query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Checks if two shards overlap.
def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata): # For each dim of each shard, check if one shard resides on the other # end of second shard with respect to that dim. As an example for a 2D # shard, we would check if one shard is above or on the left of the # other shard. ndims = len(shard1.shard_offsets) for i in range(ndims): if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]: return False if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_overlap(self):\n return False", "def overlaps(self, other): # -> bool:\n ...", "def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):\n # TODO: evaluate optimizing this if needed.\n for i in range(len(shards)):\n for j in range(i + 1, len(shards)):\n if _check_shard_metadata_pair_overlap(shards[i], shards[j]):\n raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n first[5] == second[5],\n first[6] == second[6]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[5] - second[5]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n abs(first[3] - second[3]) <= th]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n\n if all(map(lambda x: abs(x[0] - x[1]) <= th, zip(first, second))):\n return True\n else:\n return False", "def do_overlap(r1, r2):\n r1_s, r1_e = r1\n r2_s, r2_e = r2\n\n return r1_s <= r2_s <= r1_e or r2_s <= r1_s <= r2_e", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def overlaps(self, other):\n return (self.right > other.left and self.left < other.right and\n self.top < other.bottom and self.bottom > other.top)", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def overlap(indices1, indices2):\n assert (len(indices1) == 2 and len(indices2) == 2)\n indices1 = sorted(indices1)\n indices2 = sorted(indices2)\n if (indices2[0] <= indices1[0] <= indices2[1]) or \\\n (indices2[0] <= indices1[1] <= indices2[1]) or \\\n (indices1[0] <= indices2[0] <= indices1[1]) or \\\n (indices1[0] <= indices2[1] <= indices1[1]):\n return True\n else:\n return False", "def have_overlap(self,\n entry1: Union[Annotation, int],\n entry2: Union[Annotation, int]) -> bool:\n entry1_: Annotation = self._entry_index[\n entry1] if isinstance(entry1, (int, np.integer)) else entry1\n entry2_: Annotation = self._entry_index[\n entry2] if isinstance(entry2, (int, np.integer)) else entry1\n\n if not isinstance(entry1_, Annotation):\n raise TypeError(f\"'entry1' should be an instance of Annotation,\"\n f\" but get {type(entry1)}\")\n\n if not isinstance(entry2_, Annotation):\n raise TypeError(f\"'entry2' should be an instance of Annotation,\"\n f\" but get {type(entry2)}\")\n\n return not (entry1_.span.begin >= entry2_.span.end or\n entry1_.span.end <= entry2_.span.begin)", "def overlaps(self, other):\n pass", "def overlaps(self, other):\n\n if self.start.equal(other.start) or self.stop.equal(other.stop):\n return True\n elif self.start.before(other.start) and self.stop.after(other.start):\n return True\n elif other.stop.after(self.start) and other.stop.before(self.stop):\n return True\n else:\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def is_overlap(self, transposon):\n if self.first <= transposon.last <= self.last:\n return True\n elif self.first <= transposon.first <= self.last:\n return True\n else:\n return False", "def can_overlap(self):\n return self.is_open" ]
[ "0.72638744", "0.7062225", "0.7060286", "0.69966316", "0.6907729", "0.68349934", "0.6770295", "0.6767552", "0.67658687", "0.6728309", "0.66423035", "0.6638168", "0.66367257", "0.6632385", "0.66321814", "0.6605467", "0.6596014", "0.65437245", "0.65411097", "0.6536112", "0.6519489", "0.6512231", "0.6496759", "0.6479603", "0.64673066", "0.6425286", "0.6416054", "0.6410895", "0.6409679", "0.637295" ]
0.7538921
0
Ensures none of the shards overlap with each other.
def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]): # TODO: evaluate optimizing this if needed. for i in range(len(shards)): for j in range(i + 1, len(shards)): if _check_shard_metadata_pair_overlap(shards[i], shards[j]): raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_overlap(self):\n return False", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def overlaps(self, other): # -> bool:\n ...", "def load_overlapping_shards():\n while not event_heap and shards:\n # Try to pull events from unread shards.\n load_next_shards(shards[0].cmp_id)\n\n if event_heap and shards:\n # Pull events from all shards that overlap with the next event to be\n # yielded.\n load_next_shards(event_heap[0].id)\n elif not iterators:\n # No events in the heap and no active iterators? We're done!\n return\n\n shards_with_events = set(event.stream_shard for event in event_heap)\n for shard in iterators.keys():\n if shard in shards_with_events:\n continue\n try:\n it = iterators[shard]\n event = it.next()\n heapq.heappush(event_heap, event)\n except StopIteration:\n del iterators[shard]", "def check_collisions(self):", "def sstable_marking_test_not_intersecting_all_ranges(self):\n cluster = self.cluster\n cluster.populate(4).start(wait_for_binary_proto=True)\n node1, node2, node3, node4 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=3', 'no-warmup', '-rate', 'threads=1', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n repair_options = '' if self.cluster.version() >= '2.2' else '-inc -par'\n\n debug(\"Repairing node 1\")\n node1.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 2\")\n node2.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 3\")\n node3.nodetool(\"repair {}\".format(repair_options))\n debug(\"Repairing node 4\")\n node4.nodetool(\"repair {}\".format(repair_options))\n\n for out in (node.run_sstablemetadata(keyspace='keyspace1').stdout for node in cluster.nodelist() if len(node.get_sstables('keyspace1', 'standard1')) > 0):\n self.assertNotIn('Repaired at: 0', out)", "def overlap_with(self, other):", "def _assert_no_scope_overlap(children) -> None: # noqa: ANN001\n for c0, c1 in itertools.combinations(children, 2):\n if set(c0.scope) & set(c1.scope):\n raise OverlappingScopesException(\n \"Children {} and {} have overlapping scopes\".format(c0, c1)\n )", "def overlaps(self, other):\n pass", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]", "def validate_collision(self):\n pass", "def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False", "def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def scaffold_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str]) -> float:\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1}\n indices_in_2_with_scaffold_in_1 = {index for index in indices_2 if index_to_scaffold[index] in scaffolds_1}\n overlap = len(indices_in_2_with_scaffold_in_1) / len(indices_2)\n\n return overlap", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def is_colliding(network, allocations):\n for allocation in allocations:\n if network.overlaps(allocation):\n return True\n return False", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def collision_check(self):\n return True", "def check_collisions(self):\n for tail in self.tail:\n if tail.position == self.head.position:\n self.die()\n\n future_pos = Position(self.head_x + self.direction.move_x * Const.SQUARE_SIZE,\n self.head_y + self.direction.move_y * Const.SQUARE_SIZE)\n\n if future_pos.x < 0 or future_pos.x > Const.G_B_W - Const.SQUARE_SIZE or \\\n future_pos.y < 0 or future_pos.y > Const.G_B_H - Const.SQUARE_SIZE:\n self.die()", "def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def check_consistent(self):\n # * END LIST The end list itself must be consistent.\n # ** Each end must be of understood type\n # ** Each end must have a valid sequence or no sequence\n # ** There must be no more than one instance of each name\n # ** WARN if there are ends with no namecounts\n # * TILE LIST\n # ** each tile must be of understood type (must parse)\n # ** ends in the tile list must be consistent (must merge)\n # ** there must be no more than one tile with each name\n # self.tiles.check_consistent()\n endsfromtiles = self.tiles.glues_from_tiles()\n\n # ** WARN if any end that appears does not have a complement used or vice versa\n # ** WARN if there are tiles with no name\n # * TILE + END\n # ** The tile and end lists must merge validly\n # (checks sequences, adjacents, types, complements)\n self.glues | endsfromtiles\n\n # ** WARN if tilelist has end references not in ends\n # ** WARN if merge is not equal to the endlist\n # ** WARN if endlist has ends not used in tilelist\n # * ADAPTERS / SEEDS\n # SEED stuff was here", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def decrease_overlap(indices_1: Set[int],\n indices_2: Set[int],\n index_to_scaffold: Dict[int, str],\n scaffold_to_indices: Dict[str, Set[int]],\n indices_1_size: float) -> Tuple[Set[int], Set[int]]:\n # Make copies to prevent altering input set\n indices_1 = deepcopy(indices_1)\n indices_2 = deepcopy(indices_2)\n\n # Determine scaffolds in each of the two sets\n scaffolds_1 = {index_to_scaffold[index] for index in indices_1}\n scaffolds_2 = {index_to_scaffold[index] for index in indices_2}\n union = scaffolds_1 | scaffolds_2\n intersection = scaffolds_1 & scaffolds_2\n\n # Return indices in cases when overlap can't be changed\n if len(union) <= 1 or len(intersection) == 0:\n return indices_1, indices_2\n\n # If only one scaffold in intersection, randomly choose which set to move it to\n if len(intersection) == 1:\n scaffold = intersection.pop()\n indices = scaffold_to_indices[scaffold]\n\n indices_1 -= indices\n indices_2 -= indices\n\n indices_set = random.choice([indices_1, indices_2])\n indices_set |= indices\n\n return indices_1, indices_2\n\n # Select random scaffold and move all indices to indices_2\n scaffold_to_2 = random.choice(sorted(list(intersection)))\n indices_to_2 = scaffold_to_indices[scaffold_to_2]\n indices_1 -= indices_to_2\n indices_2 |= indices_to_2\n intersection.remove(scaffold_to_2)\n\n # Select scaffold which is closest in size to above scaffold\n scaffold_to_2_length = len(indices_to_2)\n best_size_diff = float('inf')\n best_scaffold = None\n\n # Guarantee consistent randomness\n intersection = sorted(list(intersection))\n random.shuffle(intersection)\n\n for scaffold in intersection:\n scaffold_to_1_length = len(scaffold_to_indices[scaffold])\n size_diff = abs(scaffold_to_1_length / (scaffold_to_1_length + scaffold_to_2_length) - indices_1_size)\n\n if size_diff < best_size_diff:\n best_size_diff = size_diff\n best_scaffold = scaffold\n\n # Move all indices of this scaffold to indices_1\n indices = scaffold_to_indices[best_scaffold]\n indices_2 -= indices\n indices_1 |= indices\n\n return indices_1, indices_2" ]
[ "0.6780764", "0.66072226", "0.6300442", "0.62059945", "0.62039727", "0.618798", "0.6161772", "0.61248296", "0.60674876", "0.60540277", "0.59775466", "0.59660643", "0.5925951", "0.59135264", "0.58842605", "0.58602405", "0.5835803", "0.5722045", "0.5703409", "0.57028174", "0.56958723", "0.5679249", "0.56744343", "0.5671604", "0.56381536", "0.5625889", "0.56201446", "0.5612965", "0.55906594", "0.55785084" ]
0.7294196
0
Checks if the shards_metadata is compatible with the provided tensor dims.
def check_tensor(shards_metadata, tensor_dims) -> None: # If the tensor's volume matches the total volume of all shards and # all shard boundaries are within tensor dims, we have a compatible # sharding spec for this tensor. Note that we have already verified # we don't have overlapping shards. tensor_rank = len(tensor_dims) shards_rank = len(shards_metadata[0].shard_offsets) if tensor_rank != shards_rank: raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') total_shard_volume = 0 for shard in shards_metadata: shard_volume = 1 for i, shard_length in enumerate(shard.shard_lengths): shard_volume *= shard_length if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]: raise ValueError( f'Shard offset {shard.shard_offsets[i]} and length ' f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') total_shard_volume += shard_volume tensor_volume = 1 for size in tensor_dims: tensor_volume *= size if total_shard_volume != tensor_volume: # TODO: Can we improve this error message to point out the gaps? raise ValueError( f'Total volume of shards: {total_shard_volume} ' f'does not match tensor volume: {tensor_volume}, in other words ' f'all the individual shards do not cover the entire tensor')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left of the\n # other shard.\n ndims = len(shard1.shard_offsets)\n for i in range(ndims):\n if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:\n return False\n if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:\n return False\n\n return True", "def _check_dim_array(array, ndim):\n # enlist the number of expected dimensions\n if isinstance(ndim, int):\n ndim = [ndim]\n\n # check the number of dimensions of the array\n if array.ndim not in ndim:\n raise ValueError(\"Array can't have {0} dimension(s). Expected \"\n \"dimensions are: {1}.\".format(array.ndim, ndim))", "def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def _check_dims(cls, values):\n ndim = values['ndim']\n\n # Check the range tuple has same number of elements as ndim\n if len(values['range']) < ndim:\n values['range'] = ((0, 2, 1),) * (\n ndim - len(values['range'])\n ) + values['range']\n elif len(values['range']) > ndim:\n values['range'] = values['range'][-ndim:]\n\n # Check the current step tuple has same number of elements as ndim\n if len(values['current_step']) < ndim:\n values['current_step'] = (0,) * (\n ndim - len(values['current_step'])\n ) + values['current_step']\n elif len(values['current_step']) > ndim:\n values['current_step'] = values['current_step'][-ndim:]\n\n # Check the order tuple has same number of elements as ndim\n if len(values['order']) < ndim:\n values['order'] = tuple(\n range(ndim - len(values['order']))\n ) + tuple(o + ndim - len(values['order']) for o in values['order'])\n elif len(values['order']) > ndim:\n values['order'] = reorder_after_dim_reduction(\n values['order'][-ndim:]\n )\n\n # Check the order is a permutation of 0, ..., ndim - 1\n if not set(values['order']) == set(range(ndim)):\n raise ValueError(\n trans._(\n \"Invalid ordering {order} for {ndim} dimensions\",\n deferred=True,\n order=values['order'],\n ndim=ndim,\n )\n )\n\n # Check the axis labels tuple has same number of elements as ndim\n if len(values['axis_labels']) < ndim:\n # Append new \"default\" labels to existing ones\n if values['axis_labels'] == tuple(\n map(str, range(len(values['axis_labels'])))\n ):\n values['axis_labels'] = tuple(map(str, range(ndim)))\n else:\n values['axis_labels'] = (\n tuple(map(str, range(ndim - len(values['axis_labels']))))\n + values['axis_labels']\n )\n elif len(values['axis_labels']) > ndim:\n values['axis_labels'] = values['axis_labels'][-ndim:]\n\n return values", "def check_dimension(dim, meta, trace=False):\n if dim == \"..\":\n meta[\"dimension\"] = declast.AssumedRank()\n meta[\"assumed-rank\"] = True\n else:\n meta[\"dimension\"] = declast.ExprParser(dim, trace=trace).dimension_shape()", "def is_dimension_dynamic(dim) -> bool:\n return dim is None or dim <= 0", "def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False", "def _assert_float32(tensors):\n if not isinstance(tensors, dict):\n tensors = [tensors]\n else:\n tensors = tensors.values()\n for tensor in tensors:\n if tensor.dtype.base_dtype != dtypes.float32:\n raise TypeError('Expected dtype=float32, %s.' % tensor)", "def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")", "def is_tensor_spec(self) -> bool:\n return self.inputs and isinstance(self.inputs[0], TensorSpec)", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def _is_tensor_equal(input_tensor, cache_tensor):\n if input_tensor.dtype != cache_tensor.dtype:\n return False\n\n if input_tensor.shape != cache_tensor.shape:\n return False\n\n if len(input_tensor.shape) != len(cache_tensor.shape):\n return False\n\n return True", "def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):\n # TODO: evaluate optimizing this if needed.\n for i in range(len(shards)):\n for j in range(i + 1, len(shards)):\n if _check_shard_metadata_pair_overlap(shards[i], shards[j]):\n raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')", "def validate_dimensions(self, dimensions):\n\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")\n\n if not all(elem > 0 for elem in dimensions):\n raise ValueError(f\"Dimensions must be greater than 1 {dimensions}\")\n\n if not checkallequal(dimensions):\n raise ValueError(f\"Not all dimensions are equal {dimensions}. They \"\n f\"must be equal. This will be changed in a future version\")", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def validate_dimensions(self, dimensions):\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")", "def HasTensor(tensor):\n return _C.HasTensor(_stringify_tensor(tensor))", "def check_dimensionality(quantity, compatible_units):\n if unit.is_quantity(compatible_units) or unit.is_unit(compatible_units):\n try:\n from simtk.unit.quantity import is_dimensionless\n except ModuleNotFoundError:\n from openmm.unit.quantity import is_dimensionless\n if not is_dimensionless(quantity / compatible_units):\n raise ValueError('{} does not have units compatible with expected {}'.format(quantity, compatible_units))\n elif compatible_units == float:\n if not (isinstance(quantity, float) or isinstance(quantity, np.ndarray)):\n raise ValueError(\"'{}' expected to be a float, but was instead {}\".format(quantity, type(quantity)))\n else:\n raise ValueError(\"Don't know how to handle compatible_units of {}\".format(compatible_units))\n\n # Units are compatible if they pass this point\n return True" ]
[ "0.70428336", "0.6533978", "0.6469752", "0.6321749", "0.6268698", "0.6185657", "0.6185021", "0.605668", "0.5926702", "0.5918645", "0.5908207", "0.58480775", "0.57827824", "0.5772328", "0.5762425", "0.57193136", "0.57082486", "0.5685897", "0.5651482", "0.5640664", "0.562222", "0.5601665", "0.5599323", "0.5589772", "0.5564222", "0.55534214", "0.54885685", "0.5481102", "0.54796255", "0.5443435" ]
0.83699447
0
Process newly submitted GeoPost entry... PROCEEDURE 1) Get data from POST body 2) Validate form 3) Upload photo to bucket 4) Make WFS transaction with GeoServer
def post(self, request): # GET REQUEST DATA fid = request.POST.get('fid', False) uuid = request.POST.get('uuid', False) title_text = request.POST.get('title', False) body = request.POST.get('body', False) photo = request.FILES.get('photo', False) # FOR STORAGE wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER data = { 'uuid': uuid, 'title_text': title_text, 'body': body, 'wfsxml': wfsxml } # VALIDATE FORM form = GeoPostForm(data, request.FILES) logger.info("\ninstantiate Geopost form\n") # IF FORM VALIDATION ERROR if not form.is_valid(): return server_error(request.body) #context = self.getContext(form) #return render(request, 'geopost/entry.html', context) else: pass # GET CLEAN VALUES uuid = form.cleaned_data['uuid'] wfsxml = form.cleaned_data['wfsxml'] # UPLOAD PHOTO TO BUCKET # if editing existing entry, first delete existing photo if fid: delete_from_bucket(uuid, self.imageBucket) else: pass photo.open('rb') error = upload_to_bucket( photo, self.imageBucket, photo.content_type, uuid) photo.close() # IF ERROR UPLOADING IMAGE if error: return server_error(error) else: pass # MAKE GEOSERVER WFS TRANSACTION error = post_to_geoserver(wfsxml, self.wfsURL) # ALL GOOD if not error: return HttpResponseRedirect(reverse('geopost_home')) # IF WFS TRANSACTION ERROR else: delete_from_bucket(uuid, self.imageBucket) return server_error(error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_POST(self):\n global pages, devices, settings\n try:\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n #if(self.path != '/simple/updateGPSCoordinates'):\n #print postvars\n #print self.path\n #now call the function that is meant to process this request\n if(self.path == '/simple/selectedHousehold'):\n #print 'need to get all cows in household #%s ' % postvars['household'][0]\n output = pages[postvars['page'][0]].selectedHousehold(postvars['household'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/selectedSite'):\n #print 'need to get all the households from the site #%s ' % postvars['sites'][0]\n output = pages[postvars['page'][0]].selectedSite(postvars['sites'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/nextAnimal'):\n #print 'we have finalized saving samples for one animal, now we need to go to the next animal'\n output = pages[postvars['page'][0]].nextAnimal(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/sampleCow'):\n #print 'we sampling the cow'\n #we have the cow that we want to sample...now proceed with the sampling\n output = pages[postvars['page'][0]].collectSample(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/saveSample'):\n #print 'we saving a new sample'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].saveSample(postvars, devices['gps'], settings['barcode_use'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateGPSCoordinates'):\n #we want to get the current GPS position\n output = pages[postvars['page'][0]].curPosition(devices['gps']) #for the sake of consistence, we just using the passed 'page' variable\n self.wfile.write(output)\n elif(self.path == '/simple/deleteSample'):\n #print 'we need to delete the sample %s ' % postvars['sample'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteSample(postvars['sample'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/deleteAnimal'):\n #print postvars\n #print 'we need to delete the anial %s ' % postvars['curAnimalRead'][0]\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].deleteAnimal(postvars['curAnimalRead'][0], devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/showAllSites'):\n #print postvars\n #print 'we either to show all sites or just the households within a certain radius'\n #the user have entered a sample for an animal\n output = pages[postvars['page'][0]].showSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/refreshSampler'):\n #print 'I really dont know what to do here, so we shall evaluate it a case on case basis'\n output = pages[postvars['page'][0]].refreshSampler(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/simple/updateHouseholds'):\n #print 'The radius of interest has changed...lets update the households'\n output = pages[postvars['page'][0]].updateSites(postvars, devices['gps'])\n self.wfile.write(output)\n elif(self.path == '/admin'):\n #print 'admin page'\n \n if ctype == 'multipart/form-data':\n self.send_response(301)\n form = cgi.parse_multipart(self.rfile, pdict)\n #print form\n pages[form['page'][0]].parse_form(form, info, devices)\n self.send_header('Location', 'http://localhost:%s/%s' % (settings['port'], form['page'][0]))\n self.end_headers()\n except IOError:\n self.send_error(501, 'Unsupported Method')", "def post(self):\n data = request.form.to_dict() # data is a dict with multipart/form-data\n if(not data):\n return BAD(err4,msg19,400 )\n \n if dataKey not in data:\n return BAD(err4, msg4, 400)\n \n d = data[dataKey]\n Json = ReadJson(d)\n dictionary = Json.Decode()\n if(dictionary is False):\n return BAD(json_error,msg18,400)\n if(Json.Validate(dictionary)):\n img = SaveImage(ALLOWED_EXTENSIONS)\n media = request.files\n if(imageKey not in media and imageKey in data):\n return BAD(err1, msg3, 400)\n\n if(imageKey not in media):\n return BAD(err3, msg2, 400)\n\n \n img.Save(imageKey, folder)\n ImageId = img.name\n \n if(ImageId is not None):\n client = ManagePsb(credentials, databaseName)\n query = {\n \"latitude\": dictionary[\"latitude\"],\n \"longitude\": dictionary[\"longitude\"],\n }\n Projection = {\n \"status\": 1,\n \"_id\" : 0\n }\n cursor = client.Filter(\n collection, query=query, Projection=Projection)\n c = cursor.count()\n if(c == 0):\n json = Json.Decode()\n client.Save(json, collection, img.name)\n img.Upload()\n\n else:\n return BAD(msg5, warning, 409)\n else:\n return BAD(err1, msg, 400)\n\n return OK(msg1, 201)\n\n else:\n return BAD(msg6, Json.missing, 400)", "def process_form(request):\n raw_data = request.form\n data = raw_data.copy()\n data['resources'] = request.form.getlist('resources')\n if request.remote_addr == '127.0.0.1':\n data['ip'] = '100.7.27.72'\n else:\n data['ip'] = request.remote_addr\n data['user_agent'] = request.user_agent.string\n data['@timestamp'] = datetime.utcnow()\n latitude = float(data['latitude'])\n longitude = float(data['longitude'])\n data['location'] = [latitude, longitude]\n return data", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def submit_fishfry():\n logging.info(\"\\nsubmit ----------\")\n # pdb.set_trace()\n form = FishFryForm()\n # logging.info(json.dumps(request.form, indent=2))\n # ffid = form['ffid']\n if form.validate_on_submit():\n\n # ---------------------------------------------------------------------\n # get the form data and plug it into the geojson.\n # some of that data requires post-processing; that is done here.\n\n # feature_dict = postprocess_submit(request.form.to_dict())\n\n properties = {\n \"venue_name\": form.venue_name.data,\n \"venue_address\": form.venue_address.data,\n \"venue_type\": form.venue_type.data,\n \"venue_notes\": form.venue_notes.data,\n \"website\": form.website.data,\n \"email\": form.email.data,\n \"phone\": form.phone.data,\n \"etc\": form.etc.data,\n \"handicap\": postbool(form.handicap.data),\n \"alcohol\": postbool(form.alcohol.data),\n \"homemade_pierogies\": postbool(form.homemade_pierogies.data),\n \"lunch\": postbool(form.lunch.data),\n \"take_out\": postbool(form.take_out.data),\n \"validated\": form.validated.data,\n \"publish\": form.publish.data,\n \"menu\": {\n \"text\": form.menu_txt.data,\n \"url\": form.menu_url.data\n },\n \"events\": postprocess_events(form.events.data)\n }\n geometry = {\n \"type\": \"Point\",\n \"coordinates\": [form.lng.data, form.lat.data]\n }\n\n feature = {\n \"type\": \"Feature\",\n \"properties\": properties,\n \"geometry\": geometry\n }\n\n logging.info(json.dumps(feature, indent=2))\n\n # OPTOINAL: validate with Marshmallow here\n # (WTForms is also providing validation)\n # try:\n # result = Feature().load(feature)\n # except ValidationError as err:\n # logging.warning(err.messages)\n # logging.warning(err.data)\n\n # ---------------------------------------------------------------------\n # if there is an id already provided by the form, then this is an\n # existing record, and we're doing an update.\n ffid = form.ffid.data\n if ffid and ffid != \"None\":\n logging.info(\"This is an existing record ({0})\".format(ffid))\n onefry = update_one_fishfry(\n ffid,\n properties,\n geometry\n )\n logging.info(json.dumps(onefry, indent=2))\n\n flash('Fish Fry updated! ({0})'.format(ffid), \"info\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n\n # ----------------------------------------------------------------------\n # Otherwise this is a new record. An FFID will be assigned\n # closer to the metal.\n else:\n logging.info(\"This is a new record\")\n\n # submit to the db\n onefry = make_one_fishfry(\n properties=properties,\n geometry=geometry\n )\n if 'id' in onefry.keys():\n ffid = onefry['id']\n # once the record create is submitted, reload this page with the data.\n flash('Fish Fry added! ({0})'.format(ffid), \"success\")\n return redirect(url_for('load_fishfry', ffid=ffid))\n else:\n flash(\n \"There was an 500-level error when adding data to the database.\", \"danger\")\n return render_template(\n 'pages/fishfryform.html',\n form=form,\n )\n # flash(\"Invalid data:\\n\"{0}.format(\"\\n\".join([error for error in form.errors])))\n # flash(\"You can only submit data through the form via POST request.<br>Consider using the API if you want to work with data programmatically.\", \"info\")\n # return redirect(url_for('load_fishfry', ffid=ffid))\n return render_template(\n 'pages/fishfryform.html',\n form=form\n )", "def parse_post(request):\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post", "def delete(request):\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n uuid = request.POST.get('uuid', False)\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, GeoPostBase.wfsURL)\n # ALL GOOD\n if error:\n return server_error(error)\n # IF WFS TRANSACTION ERROR\n else:\n pass\n # Delete photo from bucket\n delete_from_bucket(uuid, GeoPostBase.imageBucket)\n return HttpResponseRedirect(reverse('geopost_home'))", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)", "def post(self, request, *args, **kwargs):\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n gw_location_file = request.FILES.get('gw_location_file')\n gw_level_file = request.FILES.get('gw_level_file')\n\n if form.is_valid():\n if gw_location_file:\n gw_location_file.seek(0)\n if str(gw_location_file).split('.')[-1] == \"xls\":\n sheet = xls_get(gw_location_file, column_limit=4)\n elif str(gw_location_file).split('.')[-1] == \"xlsx\":\n sheet = xlsx_get(gw_location_file, column_limit=4)\n sheetname = next(iter(sheet))\n records = sheet[sheetname]\n for record in records:\n if record[0].lower() == 'id well':\n continue\n\n point = Point(x=record[3], y=record[2], srid=4326)\n well = GWWell.objects.create(\n gwwellname=record[0],\n gwwelllocation=point,\n gwwelltotallength=record[1]\n )\n\n if gw_level_file:\n gw_level_file.seek(0)\n if str(gw_level_file).split('.')[-1] == \"xls\":\n sheet = xls_get(gw_level_file, column_limit=4)\n elif str(gw_level_file).split('.')[-1] == \"xlsx\":\n sheet = xlsx_get(gw_level_file, column_limit=4)\n sheetname = next(iter(sheet))\n records = sheet[sheetname]\n for record in records:\n if record[0].lower == 'time':\n continue\n\n try:\n well = GWWell.objects.get(gwwellname=record[3])\n time = dateparse.parse_datetime(record[0])\n well_level_log = GWGeologyLog.objects.create(\n phenomenonTime=time,\n resultTime=time,\n gw_level=record[2],\n reference=record[1]\n )\n well.gwwellgeology.add(well_level_log)\n except GWWell.DoesNotExist:\n pass\n pass\n return self.form_valid(form)\n\n else:\n return self.form_invalid(form)", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def upload_finish(self, cloud_file):", "def do_POST(self):\n self._try_to_process_request(self._handle_post_request)", "def submit_plugin_form_data(self, form_entry, request, form,\n form_element_entries=None, **kwargs):", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post", "def post(self):", "def _postproc(self, request):\n if request.status_code != 200: raise Exception('wrong error code: {0}'.format(request.status_code))\n data = request.json()\n self.data = self._finalize_data(data)", "def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)", "def postPoint(request, Form):\n\tform = Form(request.POST)\n\tform.data = form.data.copy()\n\n\t# Convert coords to valid geometry\n\ttry:\n\t\tform.data['geom'] = normalizeGeometry(form.data['geom'])\n\texcept(ValueError):\n\t\t# TODO provide error message to user here\n\t\tJsonResponse({'success': False})\n\t\t# messages.error(request, '<strong>' + _('Error') + '</strong><br>' + _('No point was selected for this type of report.'))\n\n\t# Validate and submit to db\n\tif form.is_valid():\n\t\tpoint = form.save()\n\t\t# Errors with push notifications should not affect reporting\n\t\tif not settings.DEBUG:\n\t\t\ttry: pushNotification.pushNotification(point)\n\t\t\texcept: pass\n\n\t\treturn JsonResponse({\n\t\t\t'success': True,\n\t\t\t'point': GeoJSONSerializer().serialize([point,]),\n\t\t\t'point_type': point.p_type,\n\t\t\t'form_html': render_crispy_form(Form())\n\t\t})\n\telse:\n\t\tlogger.debug(\"Form not valid\")\n\n\t# Else: error occurred\n\tform.data['geom'] = form.data['geom'].json\n\tform_html = render_crispy_form(form)\n\treturn JsonResponse({'success': False, 'form_html': form_html})", "def post():\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass" ]
[ "0.6062715", "0.59630567", "0.58975375", "0.58711004", "0.5809544", "0.5728217", "0.5689963", "0.55832034", "0.5559245", "0.5515752", "0.54731184", "0.54603994", "0.54473406", "0.5440826", "0.5434373", "0.54309976", "0.5366671", "0.53177875", "0.5264689", "0.5257757", "0.5244774", "0.5193859", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997", "0.5181997" ]
0.8015848
0
Download pdf of VanTechy presentation slideshow.
def vantechy(request): return FileResponse(open('/files/presentation.pdf', 'rb'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def download(filename):\n return send_from_directory(directory='pdf', filename=filename)", "def download(request, ef_id):\n ef = get_object_or_404(ExamFile, id=ef_id)\n path = os.path.join(settings.MEDIA_ROOT, ef.path.path)\n response= HttpResponse(content=file(path, 'rb').read(), \n mimetype='application/pdf')\n # fn = os.path.split(ef.path.path)[1]\n # response['Content-Disposition'] = \"attachment; filename=%s\" % (fn)\n return response", "def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response", "def download_presentation(epObject, uc):\r\n fileDict = make_file_dict()\r\n fileDict = populate_file_dict(epObject, uc, fileDict)\r\n now = str(datetime.datetime.now().hour) + \\\r\n str(datetime.datetime.now().minute) + \\\r\n str(datetime.datetime.now().second)\r\n directoryName = epObject.Name.replace(\" \", \"\") + \"_presentation_\" + now\r\n os.mkdir(directoryName)\r\n os.chdir(directoryName)\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(fileDict['pageUrls'][0]).read())\r\n temp.seek(0)\r\n update_page(temp, fileDict, \"index.html\", index=True)\r\n temp.close()\r\n os.mkdir(\"Pages\")\r\n os.chdir(\"Pages\")\r\n for (pageUrl, pageFileName) in zip(fileDict['pageUrls'][1:], \r\n fileDict['pageFileNames'][1:]):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(pageUrl).read())\r\n update_page(temp, fileDict, pageFileName)\r\n temp.close()\r\n os.chdir(\"../\")\r\n os.mkdir(\"Content\")\r\n os.chdir(\"Content\")\r\n for (fileUrl, fileId) in zip(fileDict['fileUrls'], fileDict['fileIds']):\r\n fileName = eportfolio.get_ep_object_properties(uc, fileId).\\\r\n FileName.strip()\r\n urllib.request.urlretrieve(fileUrl, fileName)\r\n os.chdir(\"../\")\r\n os.mkdir(\"Formatting\")\r\n os.chdir(\"Formatting\")\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(cssUrl).read())\r\n temp.seek(0)\r\n update_css_file(cssUrl, temp, cssFileName)\r\n temp.close()\r\n for imgUrl in fileDict['imgUrls']:\r\n fileName = imgUrl[imgUrl.rfind(\"/\"): ]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[: fileName.find(\"?\")]\r\n urllib.request.urlretrieve(imgUrl, fileName)\r\n os.chdir(\"../\")\r\n print(str(fileDict))\r\n return fileDict", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download_page(url, destination):\n\n # Set and verify destination path\n destination = directory_resolve_home(directory_slash(destination))\n directory_exists(destination)\n\n # Set output name\n filename = generate_filename(url=url, title=get_page_title(read_page(url)))\n\n pdfkit.from_url(url, destination + filename)\n\n return destination + filename", "def download(texttitle):\n try:\n body = current_file.analysed_texts['Regular']\n rendered = render_template('pdf_template.html', title=texttitle, body=body)\n options = {'encoding': \"UTF-8\"}\n pdf = pdfkit.from_string(rendered, False, options=options)\n response = make_response(pdf)\n response.headers[\"Content-Type\"] = 'application/pdf'\n response.headers[\"Content-Disposition\"] = 'attachment; filename=output.pdf'\n\n return response\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(request.referrer)", "def download_pdfs_from_site(url: str, verbose=True):\n site_url = get_site_url(url)\n html = requests.get(url).text\n\n\n all_links = get_links(html)\n pdf_links = [link for link in all_links if link.endswith('pdf')]\n pdf_links = maybe_add_full_links(pdf_links, site_url)\n \n if verbose:\n print('Found the following pdf links')\n print(pdf_links)\n pdf_links = tqdm.tqdm(pdf_links)\n for link in pdf_links:\n download_from_link(link)", "def tutorial(request):\n try:\n file_path = (settings.BASE_DIR\n + '/website_files/metropolis_tutorial.pdf')\n with open(file_path, 'rb') as f:\n response = HttpResponse(f, content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"how_to.pdf\"'\n return response\n except FileNotFoundError:\n # Should notify an admin that the file is missing.\n raise Http404()", "def download_pdf( url, filename = None ):\n r = urlopen( Request( url ) )\n try:\n if filename is None:\n filename = give_filename( url )\n with open( filename, 'wb' ) as f:\n shutil.copyfileobj( r, f )\n finally:\n r.close()", "def download_latex(self):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n return downloaded_data\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))", "def download_pdf(pdf_url):\n response = requests.get(pdf_url, allow_redirects=True)\n open('./data/raw/full.pdf', 'wb').write(response.content)", "def response_pdf(self, filename):\n now = DateTime()\n nice_filename = '%s_%s' % (filename, now.strftime('%Y%m%d'))\n self.request.response.setHeader(\"Content-Type\", \"application/pdf\")\n self.request.response.setHeader(\"Content-Disposition\", \"attachment\")\n self.request.response.setHeader(\"filename\", nice_filename)\n self.request.response.setHeader('Last-Modified',\n DateTime.rfc822(DateTime()))\n self.request.response.setHeader(\"Cache-Control\", \"no-store\")\n self.request.response.setHeader(\"Pragma\", \"no-cache\")\n return open(filename, 'rb').read()", "def download(filename):\n path = os.path.join(\n current_app.root_path, current_app.config['UPLOAD_FOLDER'], filename)\n path_default = current_app.config[\"PDF_TEMPLATE_PATH\"]\n\n def generate():\n try:\n with open(path, \"rb\") as f:\n yield from f\n os.remove(path)\n except FileNotFoundError:\n with open(path_default, \"rb\") as f:\n yield from f\n\n r = current_app.response_class(generate(), mimetype='application/pdf')\n r.headers.set(\n 'Content-Disposition', 'attachment', filename=PDF_OUT_FILENAME\n )\n return r", "def download_pdf_file(download_url):\n web_file = urllib.urlopen(download_url)\n filename = \"/tmp/\" + str(uuid.uuid4()) + \".pdf\"\n local_file = open(filename, 'w')\n local_file.write(web_file.read())\n web_file.close()\n local_file.close()\n return filename", "def download_pdfs():\n try:\n # create the download folder if it does not exist already\n Path(paho_raw_reports_dir).mkdir(parents=True, exist_ok=True)\n # remove all current pdfs in the download folder\n filelist = [ f for f in os.listdir(paho_raw_reports_dir) if f.endswith(\".pdf\") ]\n for f in filelist:\n os.remove(os.path.join(paho_raw_reports_dir, f))\n # open the browser\n logging.info(\"Now opening the Firefox browser\")\n options = Options()\n options.headless = True\n options.accept_insecure_certs = True\n profile = FirefoxProfile()\n profile.set_preference('security.tls.version.enable-deprecated', True)\n # set the download location of the pdfs and remove the download prompt\n profile.set_preference(\"browser.altClickSave\", True)\n profile.set_preference(\"browser.download.folderList\", 2)\n profile.set_preference(\"browser.download.panel.shown\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False)\n profile.set_preference(\"browser.download.dir\", paho_raw_reports_dir)\n profile.set_preference(\"browser.download.useDownloadDir\", True)\n profile.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \n \"application/pdf,application/x-pdf,application/octet-stream,application/x-winzip,application/x-gzip\")\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False)\n profile.set_preference(\"browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"browser.download.manager.focusWhenStarting\", False);\n profile.set_preference(\"browser.helperApps.alwaysAsk.force\", False);\n profile.set_preference(\"browser.download.manager.alertOnEXEOpen\", False);\n profile.set_preference(\"browser.download.manager.closeWhenDone\", True);\n profile.set_preference(\"browser.download.manager.showAlertOnComplete\", False);\n profile.set_preference(\"browser.download.manager.useWindow\", False);\n profile.set_preference(\"services.sync.prefs.sync.browser.download.manager.showWhenStarting\", False);\n profile.set_preference(\"pdfjs.disabled\", True)\n driver = webdriver.Firefox(profile, options=options)\n # Go the PAHO website that holds the reports\n reports_present_on_page = True\n page_number = 0\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n while reports_present_on_page:\n logging.info(\"Navigating to \"+pahoreporturl)\n driver.get(pahoreporturl)\n # get all urls containing certain keywords on this page\n report_links_elements = driver.find_elements_by_partial_link_text(\"COVID-19 cases\")\n # store all of the urls in each element\n report_links = []\n for report_link_element in report_links_elements:\n report_links.append(report_link_element.get_attribute('href'))\n # now go through each url in the list\n for report_link in report_links:\n # navigate to each url\n driver.get(report_link)\n # once the page has loaded, click the download link\n download_link = driver.find_element_by_link_text(\"DOWNLOAD\")\n download_link.click()\n logging.info(\"File downloaded from: \"+download_link.get_attribute('href'))\n # check if we have any elements that we're interested in on this page, to control the loop\n if report_links_elements:\n reports_present_on_page = True\n page_number += 1\n pahoreporturl = \"https://www.paho.org/en/technical-reports?topic=4922&d%5Bmin%5D=&d%5Bmax%5D=&page=\"+str(page_number)\n else:\n reports_present_on_page = False\n logging.info(\"No more reports on page. Breaking loop.\")\n return 0\n except:\n logging.info(\"Encountered an issue while trying to download the pdfs.\")\n raise\n finally:\n if 'driver' in locals() and driver is not None:\n # Always close the browser\n driver.quit()\n logging.info(\"Successfully closed web browser.\")\n logging.info(\"Completed downloading of all COVID19 pdfs from PAHO website.\")", "def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()", "def click_ver_pdf(self):\n self.button.click(liquidaciones_historicas_catalog.VINCULO_VER_PDF)", "def pdf(self):\n\n for attachment in self.find('guidle:offerDetail//guidle:attachment'):\n url = self.get('guidle:url', root=attachment)\n\n if not url.endswith('.pdf'):\n return None, None\n\n name = self.get('guidle:description', root=attachment)\n name = name.strip().split('\\n')[0]\n\n return url, f'{name}.pdf'\n\n return None, None", "def download_resume(self, links):\n\t\tbot = self.bot\n\n\t\tfor link in links:\n\t\t\tbot.get(link)\n\t\t\ttime.sleep(5)\n\t\t\tmore = bot.find_element_by_class_name(\"pv-s-profile-actions__overflow-toggle.artdeco-button\").click()\n\t\t\ttime.sleep(2)\n\t\t\tsave_pdf = bot.find_element_by_class_name(\"pv-s-profile-actions--save-to-pdf\").click()\n\t\t\ttime.sleep(5)", "def _on_articles_reveal_pdf(self, evt=None, path=None):\n \n # get path from selection\n if not path:\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # get PDF path\n path = articles[0].pdf_path\n \n # check path\n if not path or not os.path.exists(path):\n wx.Bell()\n dlg = mwx.MessageDlg(self, -1, \"PDF file is not available.\", path)\n dlg.ShowModal()\n dlg.Destroy()\n return\n \n # try to reveal PDF\n try:\n if wx.Platform == '__WXMAC__':\n subprocess.Popen([\"open\", \"-R\", path])\n elif wx.Platform == '__WXMSW__':\n subprocess.Popen('explorer /select, \"%s\"' % path)\n else:\n pass\n except:\n pass", "def download_from_website(DATA_PDFS_DIR, name, url):\n # Create a folder for the PDFs if it does not exist\n if not os.path.exists(os.path.join(DATA_PDFS_DIR, name)):\n os.makedirs(os.path.join(DATA_PDFS_DIR, name))\n \n # Setup the download parameters\n start_time = time.perf_counter()\n print('Downloading from \"{}\"'.format(url))\n log_name = os.path.join(DATA_PDFS_DIR, 'log_{}.txt'.format(name))\n \n # Download using the wget command (UNIX only)\n save_to = os.path.join(DATA_PDFS_DIR, name)\n subprocess.run([\"wget\", \n url, \n \"--directory-prefix={}\".format(save_to),\n \"-nd\", \n \"--accept=pdf\", \n \"-r\", \n \"-t 3\", \n \"-e robots=off\", \n \"-nc\",\n \"-nv\",\n \"--append-output={}\".format(log_name)])\n \n # Print information about download times\n print('Downloading from \"{}\" DONE'.format(url))\n dl_time = round(time.perf_counter() - start_time, 1)\n print(' -> Downloading took {} seconds'.format(dl_time))", "def download():\n now_dt = dt.datetime.now()\n return render_template(\n 'resume/home.html',\n age=relativedelta(now_dt, dt.datetime(day=19, month=3, year=1983)).years,\n current_year=now_dt.year,\n )", "def download_file(session_requests, file_url, job_num, file_num, ext):\n \n filename = \"job_\" + str(job_num) + \"_file_\" + str(file_num) + ext\n pathname = Path(OUTPUT_PDF_PATH + filename) \n response = session_requests.get(file_url)\n pathname.write_bytes(response.content)\n \n return filename", "def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")", "def downloadPdfs(soup, full_path, pattern, subdir):\n # Create subdir, exams or solutions, if not already exists\n path_to_pdfs = os.path.join(full_path, subdir)\n if not os.path.exists(path_to_pdfs):\n os.makedirs(path_to_pdfs)\n\n # Download all the pdfz!\n for x in soup.find_all('a', text=re.compile(pattern)):\n url_to_exam = x['href']\n if url_to_exam.endswith('.pdf'):\n print download_file(url_to_exam, path_to_pdfs), ' downloaded'", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def download_participants_document(cupASSistName):\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()), urllib2.HTTPRedirectHandler())\n opener.open(\"http://www.cupassist.com/pamelding/redirect.php?tknavn=\" + cupASSistName)\n return opener.open(\"http://www.cupassist.com/pamelding/vis_paamelding.php\").read()", "def download_pdf(url):\n # Extracts the last part of the URL to be used as the name of the file\n local_filename = url.split('/')[-1].replace('%','')\n \n if local_filename not in REPORTS:\n with urllib.request.urlopen(url) as r:\n with open(f'reports/{local_filename}', 'wb') as f:\n f.write(r.read())\n \n # updates report files in the directory\n return f'reports/{local_filename}'\n else:\n print(f'Already in the database - {local_filename}')\n return False" ]
[ "0.63570714", "0.6327665", "0.6183838", "0.60757995", "0.6060038", "0.60584295", "0.6054394", "0.59414095", "0.586487", "0.5839018", "0.5790814", "0.57728595", "0.57418793", "0.57275844", "0.57084584", "0.56971765", "0.56889516", "0.5683005", "0.56304324", "0.5624967", "0.5597374", "0.5512267", "0.54995424", "0.549733", "0.54856753", "0.5470825", "0.54656774", "0.5442143", "0.5431172", "0.5430179" ]
0.658064
0
List all available charts
def list_charts(): charts_root = Path(R".\charm\data\charts") charts = list(charts_root.rglob("*.chart")) return charts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_charts(self, app):\n return self._list(self._path() + '?app_name=' + app, 'charts')", "def charts(self):\n return self.container['charts']", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def charts(self):\n return self._charts", "def charts(self):\n return self.properties.get('charts',\n EntityCollection(self.context, WorkbookChart,\n ResourcePath(\"charts\", self.resource_path)))", "def charts(self, charts):\n\n self.container['charts'] = charts", "def getCharts(self):\n \n # code from Jerry to strip off irrelevant headings\n results = []\n flag = False\n for line in self.ResultsForCSVfile:\n if flag:\n results.append(line)\n if len(line) == 0:\n flag = True\n # create charts\n charts = {}\n for (eachFrameSize,eachILOAD) in map(None,self.FrameSizeList,self.ILOADlist):\n c = self.CreateRateVsRangeGraph( eachFrameSize, eachILOAD, results )\n t = c.title\n charts[t] = c\n return charts", "def list():\n cmd = 'qri list --format json'\n result, err = shell_exec(cmd)\n if err:\n raise RuntimeError(err)\n datasets = dataset.DatasetList([dataset.Dataset(d) for d in json.loads(result)])\n datasets.sort(key=lambda d: d.human_ref())\n return datasets", "def get_charts(self, period=\"d\", size=\"l\", chart_type=\"c\", ta=\"1\"):\n\n encoded_payload = urlencode(\n {\"ty\": chart_type, \"ta\": ta, \"p\": period, \"s\": size}\n )\n\n sequential_data_scrape(\n scrape.download_chart_image,\n [\n f\"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}\"\n for row in self.data\n ],\n self._user_agent,\n )", "def show_available_datasets(params: DownloadCommandParameters):\n print(f\"\\nDatasets available in '{params.metadata_file}':\\n\")\n datasets = pd.read_csv(params.metadata_file)[\"dataset\"]\n items = datasets.value_counts()\n print(pd.DataFrame({\"Datasets\": items.index,\n \"Instances\": items.values}))", "def my_charts(page_num=1):\n # Download charts that belong to the current user\n charts = Chart.query.filter_by(owner_id=current_user.id).paginate(page_num)\n return render_template('reports/my_charts.html', charts=charts)", "def chart_finder(self, keyword):\n\n data, _ = self.helm_client.search(keyword)\n return data", "def charts(self, charts):\n\n self._charts = charts", "def available_plots(self):\n return self.visualizer.available_plots()", "def get_weekly_chart_list(self) -> ListModel[Chart]:\n return self.retrieve(\n bind=Chart,\n flatten=\"chart\",\n params=dict(method=\"user.getWeeklyChartList\", user=self.name),\n )", "def test_read_charts(self, chart, charts):\n self.chart = charts\n chart_objects = chart.objects.all()\n if not chart_objects:\n raise AssertionError(\"Could not read charts.\")", "def list_datasets():\n return METADATA.keys()", "def charts(self,\n time_period='day',\n chart_genre='all',\n per_page=None,\n page=None,\n text_format=None,\n type_='songs'):\n endpoint = type_ + '/chart'\n params = {'time_period': time_period,\n 'chart_genre': chart_genre,\n 'per_page': per_page,\n 'page': page,\n 'text_format': text_format or self.response_format}\n return self._make_request(path=endpoint, params_=params, public_api=True)", "async def allseries(self, ctx):\n\n await self.all_series_db.call(ctx)", "def my_charts(request):\n\n logger.debug('called')\n\n context = {}\n\n simulations = request.user.simulations.all().exclude(\n name__icontains=settings.STANDARD_CHART_NAME\n ).select_related(\n 'fight_style',\n 'result',\n 'simulation_type',\n 'wow_class',\n 'wow_spec',\n 'queue',\n )\n\n context['charts'] = simulations\n\n return render(request, 'general_website/my_charts.html', context=context)", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()", "def list_all_datasets(client=None):\n datasets = []\n try:\n datasets_list = list(client.list_datasets())\n if datasets_list:\n for dataset in datasets_list:\n datasets.append(dataset.dataset_id)\n except Exception as error:\n print(\n \"Exception occurred at function {}: {}\".format(\"list_all_datasets\", error)\n )\n finally:\n return datasets", "def allGraphs(date):\n g = getGraph()\n for uri, label, filename in subgraphs(date):\n if not label:\n label = \"(no label provided)\"\n g.parse(filename, format=SUBGRAPH_FORMAT)\n return g", "def available_datasets(self) -> List[str]:\n return sorted(self.__by_name.keys())", "def getDependenciesCharts(self) -> Mapping[str, 'ChartVersionInfo']:\n deps = self.getDependenciesList()\n ret: Dict[str, 'ChartVersionInfo'] = {}\n for dep in deps:\n ret[dep['name']] = self.getDependencyChart(dep['name'])\n return ret", "def get(self):\n graph_plugins = manager.GraphManager.get_graphs()\n graphs = []\n for name, graph_class in graph_plugins:\n graph_plugin = {\n \"name\": name,\n \"display_name\": graph_class.DISPLAY_NAME,\n \"description\": graph_class.DESCRIPTION,\n }\n graphs.append(graph_plugin)\n\n return jsonify(graphs)", "def index():\n return render_template(\"charts.html\")", "def charts(self, time_span=None):\n assert time_span in (None, '6 hrs', '12 hrs', '24 hrs'), time_span\n selector = '#chart option'\n for element in self.doc.cssselect(selector):\n label = element.text.strip()\n chart_id = element.attrib['value']\n hidden_input = self.doc.get_element_by_id('ae-dash-graph-' +\n chart_id)\n url = hidden_input.attrib['value']\n if not url:\n continue\n if time_span is None:\n yield label, url\n elif label.endswith(' (%s)' % time_span):\n yield label.replace(' (%s)' % time_span, ''), url", "def list_datasets(project=None):\n bigquery_client = bigquery.Client(project=project)\n\n for dataset in bigquery_client.list_datasets():\n print(dataset.name)" ]
[ "0.7755798", "0.7449958", "0.71319413", "0.7110359", "0.66801405", "0.6495061", "0.63652325", "0.6342188", "0.6272101", "0.62555903", "0.6250946", "0.6006704", "0.60066587", "0.6001703", "0.597355", "0.59719837", "0.58516073", "0.5850192", "0.5838608", "0.5827216", "0.5812616", "0.5787245", "0.5769391", "0.5727296", "0.56889516", "0.5683649", "0.56771404", "0.5655422", "0.56289655", "0.56041414" ]
0.8125694
0
r""" Convert a chart Path object to a string path relative to .\charm\data\charts
def strch(chart): charts_root = Path(R".\charm\data\charts") return str(chart.relative_to(charts_root))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def data_path(path: str, createdir: bool = False) -> str:\n path_obj = Path(path)\n if not path_obj.is_absolute():\n if inside_project():\n path_obj = Path(project_data_dir(), path)\n else:\n path_obj = Path(\".scrapy\", path)\n if createdir and not path_obj.exists():\n path_obj.mkdir(parents=True)\n return str(path_obj)", "def completePath(path):\n return os.getcwd() + convertString(path)", "def _path_to_string(path):\n return '.'.join(path)", "def get_realpath(cls, path_str):\n if path_str.startswith('/'):\n return path_str\n return os.path.abspath(os.path.join(cls.apollo_root, path_str))", "def __str__(self):\n return str(self.path.relative_to(os.getcwd()))", "def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)", "def graph_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"graph\")", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def abspath(self, path):\n return DataSource.abspath(self, self._fullpath(path))", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))", "def to_file_path(self, resourcePath: str) -> PurePath:\n rel = resourcePath.replace('res://', '')\n return self._root.joinpath(rel)", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n path = \"\"\n components = self.GetLabelComponents(label)\n if not components:\n return path\n \n for c in components[:-1]:\n path = os.path.join(path, c + self.suite_extension)\n path = os.path.join(path, components[-1])\n return path", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result", "def path_to_string(path: Path) -> str:\n assert_continuous(path)\n\n pieces = [\"M {} {}\".format(path[0].p0[0], path[0].p0[1])]\n for curve in iter(path): # iter cast not strictly necessary\n piece = \"C {} {} {} {} {} {}\".format(\n int(round(curve.c0[0])), int(round(curve.c0[1])),\n int(round(curve.c1[0])), int(round(curve.c1[1])),\n int(round(curve.p1[0])), int(round(curve.p1[1]))\n )\n pieces.append(piece)\n\n return \" \".join(pieces)", "def path(self, name):\n raise NotImplementedError(\"This backend doesn't support absolute paths.\")", "def path(self, name):\n raise NotImplementedError(\"This backend doesn't support absolute paths.\")", "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", "def get_relative_regression_path(cls) -> str:\n # Get the fully-qualified name of the subject (in dotted form)\n fully_qualified_name: str = cls.subject_type().__module__ + '.' + cls.subject_type().__qualname__\n\n # Replace the dots with platform-dependent slashes\n return fully_qualified_name.replace(\".\", os.sep)", "def dataPath(self):\n return ''", "def dag_file_path(self, string):\n if not self.has_dag_field(string):\n return None\n # TODO handle url\n root_dir = self.root_dir()\n if root_dir:\n path = os.path.join(root_dir, self.dag_field(string))\n return os.path.realpath(path)\n return os.path.realpath(self.dag_field(string))", "def _get_as_path(self):\n return self.__as_path", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def absolute_physical_path(self) -> str:\n return self._path", "def ruta_archivo(path):\n return os.path.abspath(path)" ]
[ "0.6196704", "0.5926172", "0.5841862", "0.56423295", "0.56181204", "0.5596763", "0.5596409", "0.5583181", "0.5562562", "0.55594707", "0.552106", "0.55162907", "0.55162907", "0.54929805", "0.54887694", "0.54884666", "0.54857177", "0.5473863", "0.5469934", "0.54631054", "0.5450903", "0.5450903", "0.5448774", "0.5429692", "0.54215", "0.54077876", "0.54003716", "0.5392164", "0.5383128", "0.53688675" ]
0.7617498
0
Set the map grid cell as obstacle
def set_obstacle(self, pos: tuple): if self.within_map(pos): self.map[round(pos[0]), round(pos[1])] = OBSTACLE return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()", "def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def __init__(self, grid_height, grid_width, obstacle_list = None, \r\n zombie_list = None, human_list = None):\r\n poc_grid.Grid.__init__(self, grid_height, grid_width)\r\n if obstacle_list != None:\r\n for cell in obstacle_list:\r\n self.set_full(cell[0], cell[1])\r\n if zombie_list != None:\r\n self._zombie_list = list(zombie_list)\r\n else:\r\n self._zombie_list = []\r\n if human_list != None:\r\n self._human_list = list(human_list) \r\n else:\r\n self._human_list = []", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def __init__(self, grid_height, grid_width, obstacle_list = None,\n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []", "def __init__(self, grid_height, grid_width, obstacle_list = None, \n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break" ]
[ "0.6904903", "0.6904903", "0.6622904", "0.6531364", "0.6457762", "0.6457762", "0.6451341", "0.6433322", "0.6425923", "0.6391529", "0.637739", "0.63555276", "0.63377327", "0.6329692", "0.62948984", "0.6285746", "0.62847567", "0.625142", "0.62377983", "0.62250537", "0.6216072", "0.6204581", "0.6191628", "0.6181617", "0.61785793", "0.61785793", "0.61564744", "0.61298907", "0.610743", "0.610672" ]
0.7163415
0
This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration.
def run(config): locator = cea.inputlocator.InputLocator(config.scenario) print('Key in run') print(config.bigmacc.key) i = config.bigmacc.key print(i) # SCENARIO SETUP --- config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i) print(config.general.project) cea.datamanagement.data_initializer.main(config) # use the scenario code to set the year for the lca and other operations that need the current year pathway_code = config.general.parent pathway_items = pathway_code.split('_') scenario_year = int(pathway_items[1]) config.emissions.year_to_calculate = scenario_year bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round) scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') experiment_key = 'exp_{}'.format(i) print(experiment_key) keys = [int(x) for x in str(i)] if experiment_key in scen_check['Experiments'].values.tolist(): print('Experiment was finished previously, moving to next.') pass else: print('START: experiment {}.'.format(i)) # INITIALIZE TIMER --- t0 = time.perf_counter() if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)): print(' - Folder exists for experiment {}.'.format(i)) else: os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i)) print(' - Folder does not exist for experiment {}, creating now.'.format(i)) # run the archetype mapper to leverage the newly loaded typology file and set parameters print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i)) cea.datamanagement.archetypes_mapper.main(config) # run the rule checker to set the scenario parameters print(' - Running rule checker for experiment {}.'.format(i)) cea.bigmacc.bigmacc_rules.main(config) # SIMULATIONS --- print(' - Run radiation is {}.'.format(config.bigmacc.runrad)) print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data)) # checking on need for radiation simulation if config.bigmacc.runrad == True: # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation if config.bigmacc.rerun != True: print(' - Running radiation simulation for experiment {}.'.format(i)) if os.path.exists(locator.get_radiation_building('B000')): print(' - Radiation folder exists for experiment {}, copying.'.format(i)) else: print(' - Radiation running for experiment {}.'.format(i)) cea.resources.radiation_daysim.radiation_main.main(config) else: # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i)) old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'solar-radiation') # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder()) else: radfiles = config.bigmacc.copyrad # print(' - Copying radiation results from {}.'.format(radfiles)) # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder()) print(' - Experiment {} does not require new radiation simulation.'.format(i)) # running demand forecasting if os.path.exists(locator.get_schedule_model_file('B000')): print(' - Schedules exist for experiment {}.'.format(i)) else: print(' - Schedule maker running for experiment {}.'.format(i)) schedule_maker.main(config) # check to see if we need to rerun demand or if we can copy if config.bigmacc.rerun != True: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: if keys[0] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) elif keys[6] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: cea.demand.demand_main.main(config) # print(' - Looking for demand results data from previous run for experiment {}.'.format(i)) # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i, # config.general.scenario_name, 'outputs', 'data', 'demand') # if os.path.exists(old_demand_files): # # print(' - Copy demand results files from previous run of experiment {}.'.format(i)) # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder()) # pass # else: # print(' - No results found.') # print(' - Running demand simulation for experiment {}.'.format(i)) # cea.demand.demand_main.main(config) if config.bigmacc.pv == True: print(' - Run PV is {}.'.format(config.bigmacc.pv)) if config.bigmacc.rerun == True: print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i)) old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar') if os.path.exists(old_pv_files): # print(' - Copying PV files from previous run of experiment {}.'.format(i)) # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder()) pass else: print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files)) print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) else: # if PV simulation is needed, run it. print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) print('Run water-body exchange is {}.'.format(config.bigmacc.water)) # if water-body simulation is needed, run it. if config.bigmacc.water == True: print(' - Running water body simulation for experiment {}.'.format(i)) water.main(config) # recalculating the supply split between grid and ng in the websrook DH if keys[4] == 1: print(' - Do not run district heat recalculation.') else: print(' - Run district heat recalculation.') cea.bigmacc.wesbrook_DH.main(config) if keys[7] == 1: print(' - PV use detected. Adding PV generation to demand files.') util.write_pv_to_demand(config) else: print(' - No PV use detected.') # running the emissions and costing calculations print(' - Run cost and emissions scripts.') cea.analysis.costs.system_costs.main(config) cea.analysis.lca.main.main(config) # clone out the simulation inputs and outputs directory print(' - Transferring results directory for experiment {}.'.format(i)) new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'inputs') new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data') if config.bigmacc.rerun != True: distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path) distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) time_elapsed = time.perf_counter() - t0 # save log information log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i), 'Completed': 'True', 'Experiment Time': '%d.2 seconds' % time_elapsed, 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True) log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv')) log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", ) # write netcdf of hourly_results netcdf_writer.main(config, time='hourly') if config.bigmacc.rerun != True: shutil.rmtree(locator.get_costs_folder()) shutil.rmtree(locator.get_demand_results_folder()) shutil.rmtree(locator.get_lca_emissions_results_folder()) shutil.rmtree(locator.get_solar_radiation_folder()) shutil.rmtree(locator.get_potentials_folder()) else: print(' - Rerun does not require purging of the files.') # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here if keys[0] == 1: cea.datamanagement.data_initializer.main(config) else: pass print('END: experiment {}. \n'.format(i))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n # Test single mass for now\n mass = 2e-15\n result = run_sim(mass, start, stop, sampling_rate)\n\n sim_name = 'sim_mass_{:g}_rate_{:g}.npz'.format(mass, sampling_rate)\n np.savez(sim_name, times=result[0], amplitudes=result[1])\n print('saved: {}'.format(sim_name))", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def main(starttime, hstart, hstop, cfg):\n\n if cfg.target is tools.Target.ICONOEM or cfg.target is tools.Target.ICONART:\n\n logging.info('ICON chemistry data for IC/BC')\n\n # Wait for meteo to finish first\n tools.check_job_completion(cfg.log_finished_dir,\"meteo\")\n\n tools.create_dir(cfg.icon_input_oae, \"online emissions input\")\n tools.create_dir(cfg.icon_input_icbc, \"icon_input_icbc\")\n tools.create_dir(cfg.icon_input_icbc_processed, \"icon_input_icbc_processed\")\n\n starttime_real = starttime + timedelta(hours = hstart)\n\n #-----------------------------------------------------\n # Remap chemistry initial conditions\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"ic_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"ic_chem\")\n\n # Write remap_chem namelist\n in_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc+'_dry.nc')\n in_grid_filename = in_filename\n out_grid_filename = os.path.join(cfg.input_root_grid,cfg.dynamics_grid_filename)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap_chem'])) as input_file:\n to_write = input_file.read()\n output_nml = os.path.join(cfg.icon_work, 'icontools_remap_chem_ic.namelist')\n with open(output_nml, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_filename=in_filename,\n out_filename=out_filename,\n in_grid_filename=in_grid_filename,\n out_grid_filename=out_grid_filename)\n outf.write(to_write)\n\n # Write remapfields namelist\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_ic'])) as input_file:\n to_write = input_file.read()\n output_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_ic.namelist')\n with open(output_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n # Write run script (remap_ic.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_ic_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_ic.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_ic.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped initial conditions with icontools\")\n\n os.remove(output_nml)\n os.remove(output_fields)\n os.remove(output_run)\n\n # Transform initial data from dry to wet mixing ratios\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=out_filename,output='temp_file_01.nc')\n cdo.selvar(\"LNSP\",input=out_filename,output='temp_file_03.nc')\n os.remove(out_filename)\n # Rename variable to match ICON internal name with CDO:\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc)\n cdo.chname(\"CH4w\",\"CH4\",input='temp_file_01.nc',output='temp_file_02.nc')\n cdo.merge(input='temp_file_02.nc temp_file_03.nc',output=out_filename)\n\n os.remove('temp_file_01.nc')\n os.remove('temp_file_02.nc')\n os.remove('temp_file_03.nc')\n \n\n\n #-----------------------------------------------------\n # Remap chem LBC\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"lbc_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"lbc_chem\")\n\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_lbc'])) as input_file:\n to_write = input_file.read()\n output_nml_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_lbc.namelist')\n with open(output_nml_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n # Write remap_lbc namelist\n in_grid_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n in_filename = os.path.join(cfg.input_root_chem,time.strftime(cfg.chem_nameformat)+'.grb')\n out_grid_filename = os.path.join(cfg.icon_input_grid,cfg.lateral_boundary_grid)\n out_filename = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap'])) as input_file:\n to_write = input_file.read()\n output_nml_lbc = os.path.join(cfg.icon_work, 'icontools_remap_chem_lbc.namelist')\n with open(output_nml_lbc, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_grid_filename=in_grid_filename,\n in_filename=in_filename,\n out_grid_filename=out_grid_filename,\n out_filename=out_filename)\n outf.write(to_write)\n\n # Write run script (remap_chem_lbc.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_lbc_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_lbc.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_lbc.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped boundary conditions at {} with icontools\".format(time))\n\n os.remove(output_nml_lbc)\n os.remove(output_run)\n\n os.remove(output_nml_fields)\n\n\n #-----------------------------------------------------\n # Merge chem files with meteo files using cdo\n #-----------------------------------------------------\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n chem_file = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n meteo_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n var_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_var.nc')\n transform_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_transform.nc')\n name_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_name.nc')\n processed_file = os.path.join(cfg.icon_input_icbc_processed, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n\n # Select variable with CDO\n cdo.selvar(\"CH4\",\"QV\",input=chem_file,output=var_file)\n # Transform to wet-mixing ratios with CDO\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=var_file,output=transform_file)\n # Rename variable to match ICON internal name with CDO:\n cdo.chname(\"CH4w\",\"oem_tracer_1\",input=transform_file,output=name_file)\n # Merge with CDO\n cdo.merge(input=name_file+' '+meteo_file,output=processed_file)\n\n # Delete temporary files\n os.remove(chem_file)\n os.remove(var_file)\n os.remove(transform_file)\n os.remove(name_file)\n\n logging.info(\"Merged chem variables to file {}\".format(meteo_file))\n\n\n\n # If COSMO (and not ICON):\n else:\n inv_to_process = []\n if cfg.target is tools.Target.COSMOGHG:\n try:\n CAMS = dict(fullname = \"CAMS\",\n nickname = \"cams\",\n executable = \"cams4int2cosmo\",\n indir = cfg.cams_dir_orig,\n outdir = cfg.cams_dir_proc,\n param = cfg.cams_parameters)\n inv_to_process.append(CAMS)\n except AttributeError:\n pass\n try:\n CT = dict(fullname = \"CarbonTracker\",\n nickname = \"ct\",\n executable = \"ctnoaa4int2cosmo\",\n indir = cfg.ct_dir_orig,\n outdir = cfg.ct_dir_proc,\n param = cfg.ct_parameters)\n inv_to_process.append(CT)\n except AttributeError:\n pass\n elif cfg.target is tools.Target.COSMOART:\n try:\n MOZART = dict(fullname = 'MOZART',\n nickname = 'mozart',\n executable = 'mozart2int2lm',\n indir = cfg.mozart_file_orig,\n outdir = cfg.mozart_dir_proc,\n param = [{'inc' : cfg.mozart_inc,\n 'suffix' : cfg.mozart_prefix}])\n inv_to_process.append(MOZART)\n except AttributeError:\n pass\n else:\n # Unknown target\n raise RuntimeError(\"Unknown target: {}\".format(cfg.target))\n\n # TO DO \n #MOZART = dict(fullname=\"MOZART\", nickname=\"mozart\",executable=\"cams4int2cosmo\")\n \n logging.info(\"Processing \" + \", \".join([i[\"fullname\"] for i in inv_to_process])+\" data\")\n\n scratch_path = os.path.join(cfg.int2lm_input,'icbc')\n tools.create_dir(scratch_path, \"icbc input\")\n\n for inv in inv_to_process:\n logging.info(inv[\"fullname\"]+\" files\")\n tools.create_dir(inv[\"outdir\"], \"processed \" + inv[\"fullname\"])\n #process_inv(starttime,hstart,hstop,increment,inv,cfg)\n \n for p in inv[\"param\"]:\n inc = p[\"inc\"]\n for time in tools.iter_hours(starttime, hstart, hstop, inc):\n logging.info(time)\n\n filename = os.path.join(inv[\"outdir\"],p[\"suffix\"]+\"_\"+time.strftime(\"%Y%m%d%H\")+\".nc\")\n if not os.path.exists(filename):\n logging.info(filename)\n try:\n to_call = getattr(tools, inv[\"executable\"])\n to_call.main(time,inv[\"indir\"],inv[\"outdir\"],p)\n except:\n logging.error(\"Preprocessing \"+inv[\"fullname\"] + \" data failed\")\n raise\n\n # copy to (temporary) run input directory\n tools.copy_file(filename, scratch_path)\n\n logging.info(\"OK\")", "def main_loop(csd_profile, csd_seed, total_ele, num_init_srcs=1000):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n\n #TrueCSD\n t_csd_x, t_csd_y, t_csd_z, true_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=0., end_x=1., \n start_y=0., end_y=1., \n start_z=0., end_z=1.,\n res_x=100, res_y=100,\n res_z=100)\n\n #Electrodes\n ele_lims = [0.15, 0.85] #square grid, xy min,max limits\n ele_res = int(np.ceil(total_ele**(3**-1))) #resolution of electrode grid\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, t_csd_y, t_csd_z)\n ele_x = ele_pos[:, 0]\n ele_y = ele_pos[:, 1]\n ele_z = ele_pos[:, 2]\n \n #kCSD estimation\n gdX = 0.05\n gdY = 0.05\n gdZ = 0.05\n x_lims = [.0,1.] #CSD estimation place\n y_lims = [.0,1.]\n z_lims = [.0,1.]\n params = {'h':50., \n 'gdX': gdX, 'gdY': gdY, 'gdZ': gdZ,\n 'xmin': x_lims[0], 'xmax': x_lims[1], \n 'ymin': y_lims[0], 'ymax': y_lims[1],\n 'zmin': y_lims[0], 'zmax': y_lims[1],\n 'ext': 0.0, 'n_srcs_init': num_init_srcs}\n tic = time.time() #time it\n k, est_csd = do_kcsd(ele_pos, pots, h=50., \n gdx=gdX, gdy= gdY, gdz=gdZ,\n xmin=x_lims[0], xmax=x_lims[1], \n ymin=y_lims[0], ymax=y_lims[1],\n zmin=z_lims[0], zmax=z_lims[1],\n n_src_init=num_init_srcs, src_type='step')\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, chr_y, chr_z, test_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1],\n start_y=y_lims[0], end_y=y_lims[1],\n start_z=z_lims[0], end_z=z_lims[1],\n res_x=int((x_lims[1]-x_lims[0])/gdX), \n res_y=int((y_lims[1]-y_lims[0])/gdY),\n res_z=int((z_lims[1]-z_lims[0])/gdZ))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,:,:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title = str(k.lambd)+','+str(k.R)+', '+str(k.cv_error)+', '+str(rms)+', '+str(toc)\n save_as = csd_name+'_'+str(csd_seed)+'of'+str(total_ele)\n #save_as = csd_name+'_'+str(num_init_srcs)+'_'+str(total_ele)\n make_plots(title, \n chr_x, chr_y, chr_z, test_csd,\n ele_x, ele_y, ele_z, pots,\n k.estm_x, k.estm_y, k.estm_z, est_csd) \n #save\n result_kcsd = [k.lambd, k.R, k.cv_error, rms, toc]\n return est_csd, result_kcsd", "def run_process(hrc):\n#\n#--- set conditions for either hrc-i or hrc s\n#\n if hrc == 'hrc_i':\n out_list = 'hrc_i_list'\n data_dir = '/data/hrc/i/'\n inst = 'i'\n else:\n out_list = 'hrc_s_list'\n data_dir = '/data/hrc/s/'\n inst = 's'\n#\n#--- make a list of obsids\n#\n cmd = 'ls -d ' + data_dir + '* > ' + zspace\n os.system(cmd)\n data = mcf.read_data_file(zspace, remove=1)\n hlist = []\n for ent in data:\n atemp = re.split('\\/', ent)\n obsid = atemp[-1]\n if mcf.is_neumeric(obsid):\n hlist.append(obsid)\n\n# if hrc == 'hrc_i':\n# print(\"HRC I : \" + str(hlist))\n# else:\n# print(\"HRC S : \" + str(hlist))\n# \n for obsid in hlist:\n obsid = str(int(float(obsid)))\n\n with open(out_list, 'w') as fo:\n fo.write(str(obsid) + '\\n')\n cmd = 'rm -rf ' + data_dir + obsid + \"analysis/*\"\n os.system(cmd)\n#\n#--- extract fits data needed for analysis\n#\n chk = extract_hrc_data(obsid, data_dir)\n if chk == False:\n print(\"Not all data are available\")\n continue\n\n if hrc == 'hrc_i':\n cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'\n else:\n cmd = 'csh -f ' + bin_dir + 'repro_all_S_new.csh hrc_s_list'\n\n try:\n run_ciao(cmd)\n cdir = data_dir + '/' + str(obsid)\n if os.path.isdir(cdir):\n cmd = 'chgrp -R hat ' + cdir \n os.system(cmd)\n cmd = 'chmod -R 775 ' + cdir \n os.system(cmd)\n#\n#--- directory name should be 5 digit\n#\n test = int(float(obsid))\n if test < 10000:\n chk = mcf.add_leading_zero(obsid, 5)\n odir = data_dir + '/' + str(chk)\n if os.path.isdir(odir):\n cmd = 'rm -rf ' + odir\n os.system(cmd)\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n else:\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n except:\n pass\n\n mcf.rm_files(out_list)\n correct_naming(obsid, inst)\n\n #chk_proccess_status(inst, hlist)", "def generate_megafile():\n\n print(\"\\nFetching testing dataset…\")\n testing = get_testing()\n\n print(\"\\nFetching ECDC dataset…\")\n ecdc = get_ecdc()\n\n location_mismatch = set(testing.location).difference(set(ecdc.location))\n for loc in location_mismatch:\n print(f\"<!> Location '{loc}' has testing data but is absent from ECDC data\")\n\n print(\"\\nFetching OxCGRT dataset…\")\n cgrt = get_cgrt()\n\n all_covid = (\n ecdc\n .merge(testing, on=[\"date\", \"location\"], how=\"outer\")\n .merge(cgrt, on=[\"date\", \"location\"], how=\"left\")\n .sort_values([\"location\", \"date\"])\n )\n\n # Add ISO codes\n print(\"Adding ISO codes…\")\n iso_codes = pd.read_csv(os.path.join(INPUT_DIR, \"iso/iso3166_1_alpha_3_codes.csv\"))\n\n missing_iso = set(all_covid.location).difference(set(iso_codes.location))\n if len(missing_iso) > 0:\n print(missing_iso)\n raise Exception(\"Missing ISO code for some locations\")\n\n all_covid = iso_codes.merge(all_covid, on=\"location\")\n\n # Add continents\n print(\"Adding continents…\")\n continents = pd.read_csv(\n os.path.join(INPUT_DIR, \"owid/continents.csv\"),\n names=[\"_1\", \"iso_code\", \"_2\", \"continent\"],\n usecols=[\"iso_code\", \"continent\"],\n header=0\n )\n\n all_covid = continents.merge(all_covid, on=\"iso_code\", how=\"right\")\n\n # Add macro variables\n # - the key is the name of the variable of interest\n # - the value is the path to the corresponding file\n macro_variables = {\n \"population\": \"un/population_2020.csv\",\n \"population_density\": \"wb/population_density.csv\",\n \"median_age\": \"un/median_age.csv\",\n \"aged_65_older\": \"wb/aged_65_older.csv\",\n \"aged_70_older\": \"un/aged_70_older.csv\",\n \"gdp_per_capita\": \"wb/gdp_per_capita.csv\",\n \"extreme_poverty\": \"wb/extreme_poverty.csv\",\n \"cardiovasc_death_rate\": \"gbd/cardiovasc_death_rate.csv\",\n \"diabetes_prevalence\": \"wb/diabetes_prevalence.csv\",\n \"female_smokers\": \"wb/female_smokers.csv\",\n \"male_smokers\": \"wb/male_smokers.csv\",\n \"handwashing_facilities\": \"un/handwashing_facilities.csv\",\n \"hospital_beds_per_thousand\": \"owid/hospital_beds.csv\",\n \"life_expectancy\": \"owid/life_expectancy.csv\",\n \"human_development_index\": \"un/human_development_index.csv\",\n }\n all_covid = add_macro_variables(all_covid, macro_variables)\n\n print(\"Writing to CSV…\")\n all_covid.to_csv(os.path.join(DATA_DIR, \"owid-covid-data.csv\"), index=False)\n\n print(\"Writing to XLSX…\")\n all_covid.to_excel(os.path.join(DATA_DIR, \"owid-covid-data.xlsx\"), index=False)\n\n print(\"Writing to JSON…\")\n df_to_json(all_covid, os.path.join(DATA_DIR, \"owid-covid-data.json\"), macro_variables.keys())\n\n # Store the last updated time\n timestamp_filename = os.path.join(DATA_DIR, \"owid-covid-data-last-updated-timestamp.txt\")\n with open(timestamp_filename, \"w\") as timestamp_file:\n timestamp_file.write(datetime.utcnow().replace(microsecond=0).isoformat())\n\n print(\"All done!\")", "def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)", "def main(clean_dir, rsfc_dir, atlas_dir, subject, sessions, space, desc_list, n_jobs):\n os.system(f\"export OMP_NUM_THREADS={n_jobs}\")\n assert len(desc_list) == 2\n atlases = sorted(glob(op.join(atlas_dir, \"*\")))\n\n if sessions[0] is None:\n temp_ses = glob(op.join(clean_dir, subject, \"ses-*\"))\n if len(temp_ses) > 0:\n sessions = [op.basename(x) for x in temp_ses]\n\n for session in sessions:\n if session is not None:\n clean_subj_dir = op.join(clean_dir, subject, session, \"func\")\n rsfc_subj_dir = op.join(rsfc_dir, subject, session, \"func\")\n else:\n clean_subj_dir = op.join(clean_dir, subject, \"func\")\n rsfc_subj_dir = op.join(rsfc_dir, subject, \"func\")\n\n # Collect important files\n clean_subj_files = sorted(\n glob(\n op.join(\n clean_subj_dir, f\"*task-rest*_space-{space}*_desc-{desc_list[0]}_bold.nii.gz\"\n )\n )\n )\n\n if len(clean_subj_files) > 0:\n os.makedirs(rsfc_subj_dir, exist_ok=True)\n\n # ###################\n # RSFC\n # ###################\n for clean_subj_file in clean_subj_files:\n clean_subj_name = op.basename(clean_subj_file)\n prefix = clean_subj_name.split(\"desc-\")[0].rstrip(\"_\")\n\n mask_files = sorted(glob(op.join(clean_subj_dir, f\"{prefix}_desc-brain_mask.nii.gz\")))\n assert len(mask_files) == 1\n\n mask_name = os.path.basename(mask_files[0])\n mask_file = op.join(rsfc_subj_dir, mask_name)\n copyfile(mask_files[0], mask_file)\n\n print(f\"\\tProcessing {subject}, {session} files:\", flush=True)\n print(f\"\\t\\tClean: {clean_subj_file}\", flush=True)\n print(f\"\\t\\tMask: {mask_file}\", flush=True)\n\n for atlas in atlases:\n atlas_name = op.basename(atlas)\n atlas_imgs = sorted(glob(op.join(atlas, \"*.nii.gz\")))\n assert len(atlas_imgs) == 1\n atlas_img = atlas_imgs[0]\n\n lab_files = sorted(glob(op.join(atlas, \"*.txt\")))\n if len(lab_files) == 0:\n # Do not create label table file\n make_table = False\n else:\n assert len(lab_files) == 1\n lab_file = lab_files[0]\n make_table = True\n\n # Resample atlas\n atlas_img_res = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}_atlas.nii.gz\")\n if not op.exists(atlas_img_res):\n roi_resample(atlas_img, atlas_img_res, clean_subj_file)\n \n # Create label table\n lab_table = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}_labtable.niml.lt\")\n if (not op.exists(lab_table)) and (make_table):\n make_label_table(lab_file, lab_table, atlas_img_res)\n\n # Calculate RSFC\n rsfc_atlas_subj = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}\")\n if not op.exists(f\"{rsfc_atlas_subj}_000.netcc\"):\n roi2roi_conn(clean_subj_file, mask_file, atlas_img_res, rsfc_atlas_subj)", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def main(runID=00, store_export='datafile', evalperday=1):\n tnow = 0\n tstart = tm.time()\n\n # get lists for keeping count of cells.\n free_naives, free_memory, GC_waiting = new_lists()\n\n # get random number objects for uniform 0-1, ints for GCs\n RNs = Rands()\n RIs = RandInts()\n\n # get the premade pool of Ab sequences that bind a chosen Ag with the given\n # distribution of binding energies. An Ag of appropriate length is made\n # directly within the sequence repertoire function\n seq_list, E_list, AgEpitope = make_shaped_repertoire(RNs)\n\n # for the required number of naive cells in the system, make Abs and append\n # to free_naive list, same for unspecific memory cells\n for n in xrange(cf.naive_pool):\n newcell = make_naive(RNs, seq_list, AgEpitope, tnow)\n free_naives.append(newcell)\n\n for n in xrange(cf.memory_pool):\n newcell = make_memory(RNs, seq_list, AgEpitope, tnow)\n free_memory.append(newcell)\n\n # get Ag level over time\n Agcurve = Ag_density()\n\n # get available LFs over time\n LFcurve = LF_presence()\n\n # open event list, event structure: (execution time, type, GC, cell list)\n event_list = []\n\n # bookkeeping - general\n l_fm = [] # free memory\n mut_list = [] # for collecting all mutations and their effects\n\n if (store_export == 'datafile' or store_export == 'dictionary'):\n l_fn = [] # free naives\n l_GCs = [[] for i in range(cf.nGCs)] # cells in each GC\n ms_times = [[] for gc in range(cf.nGCs)] # times of memory prod./GC\n ms_vals = [[] for gc in range(cf.nGCs)] # quality of memory prod./GC\n ms_fams = [[] for gc in range(cf.nGCs)] # family of memory prod./GC\n ms_muts = [[] for gc in range(cf.nGCs)] # mutations of memory prod./GC\n # external or internal data storage\n if store_export == 'datafile':\n filepath = 'raw_data/store{}.h5'.format(runID)\n store = pd.HDFStore(filepath)\n elif store_export == 'dictionary':\n store = {}\n # bookkeeping - minimal\n l_aff = [] # mean affinities\n s_aff = [] # std of affinities\n l_mut = [] # mean mutation counts\n s_mut = [] # std of mutation counts\n l_ents = [] # family entropies\n\n # timepoints at which to store the state of the simulation\n evalfac = int(12/evalperday)\n evaltimes = np.array(range(int(cf.endtime / evalfac))) * evalfac\n\n # start looping over all events at every timestep\n while tnow <= cf.endtime:\n if (store_export == 'datafile' or store_export == 'dictionary'):\n l_fm.append(len(free_memory))\n l_fn.append(len(free_naives))\n for i in range(len(l_GCs)):\n GCcount = len(GC_waiting[i])\n for event in event_list:\n if (event[1] == 'Differentiate' or event[1] == 'Divide') \\\n and event[2] == i:\n GCcount += len(event[3])\n l_GCs[i].append(GCcount)\n\n # remove cells which have died from the naive_pool\n free_naives = old_cells_die(free_naives, tnow)\n # remove cells which have died from the waiting_room\n GC_waiting = long_waiters_die(GC_waiting, tnow)\n\n # refill the naive_pool if it has fallen below standard size\n # taking care that it is not refilled instantaneously but at a speed\n # of the order of natural turnover (naive_pool/tlifeN)\n maxrefill = np.ceil(cf.naive_pool/cf.tlifeN)\n navcount = 0\n while len(free_naives) < cf.naive_pool and navcount < maxrefill:\n newcell = make_naive(RNs, seq_list, AgEpitope, tnow)\n free_naives.append(newcell)\n navcount += 1\n\n # execute list specific events if present at this timepoint\n if len(event_list) > 0:\n # check which events happen at this timepoint\n now_list = [event for event in event_list if event[0] == tnow]\n event_list = [event for event in event_list if event[0] != tnow]\n\n # execute events happening now\n for event in now_list:\n if event[1] == 'Enter':\n GC_waiting = cells_enter_GCs(GC_waiting, event[3], tnow,\n RIs)\n elif event[1] == 'Divide':\n GC_waiting[event[2]], mut_list = cell_division(\n GC_waiting[event[2]], event[3], AgEpitope, tnow,\n mut_list, RNs)\n elif event[1] == 'Differentiate':\n free_memory = free_memory + event[3]\n if (store_export == 'datafile' or\n store_export == 'dictionary'):\n for cell in event[3]:\n ms_times[event[2]].append(tnow)\n ms_vals[event[2]].append(cell.affinity)\n ms_fams[event[2]].append(cell.family)\n ms_muts[event[2]].append(cell.mutations)\n\n # activate free naive and memory cells if Ag is present in the system\n if Agcurve[tnow] > 0:\n free_naives, free_memory, event, actsum = try_activation(\n Agcurve[tnow], free_naives, free_memory, tnow, RNs)\n if event is not None:\n event_list.append(event)\n\n # select waiting cells for help signals if LFs are present\n if LFcurve[tnow] > 0:\n # perform selection for every GC separately,\n for i in range(len(GC_waiting)):\n if len(GC_waiting[i]) >= 0:\n GC_waiting[i], new_events, mut_list = select_best_waiters(\n LFcurve[tnow], GC_waiting[i], i, tnow, AgEpitope,\n mut_list, RNs)\n event_list = event_list + new_events\n\n # evaluate everything and store results if tnow in evaltimes\n if tnow in evaltimes:\n if (store_export == 'datafile' or store_export == 'dictionary'):\n meminfo = []\n for cell in free_memory:\n meminfo.append((cell.ID, cell.family, cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime,\n cell.mutations, cell.origin))\n memDF = pd.DataFrame(meminfo, columns=['ID', 'family',\n 'sequence', 'affinity',\n 'affinity0',\n 'birthtime',\n 'mutations', 'origin'])\n store['free_{}'.format(tnow)] = memDF\n\n for i in range(cf.nGCs):\n GCinfo = []\n for cell in GC_waiting[i]:\n GCinfo.append((cell.ID, cell.family, cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime, cell.mutations))\n for event in event_list:\n if (event[1] == 'Differentiate' or\n event[1] == 'Divide') and event[2] == i:\n for cell in event[3]:\n GCinfo.append((cell.ID, cell.family,\n cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime, cell.mutations))\n GCDF = pd.DataFrame(GCinfo, columns=['ID', 'family',\n 'sequence',\n 'affinity',\n 'affinity0',\n 'birthtime',\n 'mutations'])\n store['GC{0}_{1}'.format(i, tnow)] = GCDF\n elif store_export == 'minimal':\n l_fm.append(len(free_memory))\n afflist = [cell.affinity for cell in free_memory]\n mutatlist = [cell.mutations for cell in free_memory]\n familist = [cell.family for cell in free_memory]\n l_aff.append(np.nanmean(afflist))\n s_aff.append(np.nanstd(afflist))\n l_mut.append(np.nanmean(mutatlist))\n s_mut.append(np.nanstd(mutatlist))\n\n CC = Counter(familist)\n l_ents.append(scipy.stats.entropy(CC.values(), base=2))\n\n # increment time\n tnow += 1\n\n tend = tm.time()\n print('pure simulation time = {} s'.format(tend - tstart))\n\n if (store_export == 'datafile' or store_export == 'dictionary'):\n # put all remaining information into storage\n store['l_times'] = pd.DataFrame(np.arange(cf.endtime+1)/float(12))\n store['l_fn'] = pd.DataFrame(l_fn)\n store['l_fm'] = pd.DataFrame(l_fm)\n for i in range(len(l_GCs)):\n store['l_GCs_{}'.format(i)] = pd.DataFrame(l_GCs[i])\n store['LFcurve'] = pd.DataFrame(LFcurve)\n store['Agcurve'] = pd.DataFrame(Agcurve)\n store['mut_list'] = pd.DataFrame(mut_list)\n store['ms_fams'] = pd.DataFrame(ms_fams)\n store['ms_vals'] = pd.DataFrame(ms_vals)\n store['ms_times'] = pd.DataFrame(ms_times)\n store['ms_muts'] = pd.DataFrame(ms_muts)\n store['times'] = pd.DataFrame(evaltimes)\n store['nGCs'] = pd.DataFrame([cf.nGCs])\n store['E_list'] = pd.DataFrame(E_list)\n\n if store_export == 'datafile':\n store.close()\n return filepath\n elif store_export == 'dictionary':\n return store\n\n elif store_export == 'minimal':\n return evaltimes, l_fm, l_aff, s_aff, l_mut, s_mut, l_ents", "def main():\n\n test = argv[1]\n if test == 'test':\n dir_data = 'tests/test_data/'\n elif test == 'real':\n dir_data = 'real_data/'\n\n naf_CGI = 18\n neu_CGI = 18\n nas_CGI = 8\n nJ = 28\n nM = 28\n nA = 76\n\n print 'naf_CGI ' + str(naf_CGI)\n print 'neu_CGI ' + str(neu_CGI)\n print 'nas_CGI ' + str(nas_CGI)\n print 'nA ' + str(nA)\n print 'nJ ' + str(nJ)\n print 'nM ' + str(nM)\n\n CGI_file = str(dir_data)+'YRI9.CEU9.CHB4.chr1.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes_snpsonly_maf0.005'\n CGIarray_file = str(dir_data)+'YRI9.CEU9.CHB4.chr1.atDNA.biAllelicSNPnoDI.genotypes_hg18_Behar_HGDP_FtDNA'\n array_file = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_chr1_subset_21509'\n print CGI_file\n print CGIarray_file\n print array_file\n\n seq_real_CGI_file = AllelesReal(str(CGI_file)+'.tped')\n seqAF_CGI_bits = seq_real_CGI_file.make_bitarray_seq(0, naf_CGI)\n seqEu_CGI_bits = seq_real_CGI_file.make_bitarray_seq(naf_CGI, naf_CGI + neu_CGI)\n seqAs_CGI_bits = seq_real_CGI_file.make_bitarray_seq(naf_CGI + neu_CGI, naf_CGI + neu_CGI + nas_CGI)\n\n seq_real_CGIarray_file = AllelesReal(str(CGIarray_file)+'.tped')\n seqAf_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(0, naf_CGI)\n seqEu_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(naf_CGI, naf_CGI + neu_CGI)\n seqAs_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(naf_CGI + neu_CGI, naf_CGI + neu_CGI + nas_CGI)\n\n seq_real_array_file = AllelesReal(str(array_file)+'.tped')\n seqJ_asc_bits = seq_real_array_file.make_bitarray_seq(0, nJ)\n seqM_asc_bits = seq_real_array_file.make_bitarray_seq(nJ, nJ + nM)\n seqA_asc_bits = seq_real_array_file.make_bitarray_seq(nJ + nM, nJ + nM + nA)\n\n res = []\n\n Af_res = []\n Af_res.extend(afs_stats_bitarray.base_S_ss(seqAF_CGI_bits, naf_CGI))\n pi_AfCGI = afs_stats_bitarray.Pi2(Af_res[3], naf_CGI)\n Af_res.append(afs_stats_bitarray.Tajimas(pi_AfCGI, Af_res[0], naf_CGI))\n del (Af_res[3])\n res.extend(Af_res)\n head = 'SegS_Af_CGI\\tSing_Af_CGI\\tDupl_Af_CGI\\tTajD_Af_CGI\\t'\n\n Eu_res = []\n Eu_res.extend(afs_stats_bitarray.base_S_ss(seqEu_CGI_bits, neu_CGI))\n pi_EuCGI = afs_stats_bitarray.Pi2(Eu_res[3], neu_CGI)\n Eu_res.append(afs_stats_bitarray.Tajimas(pi_EuCGI, Eu_res[0], neu_CGI))\n del (Eu_res[3])\n res.extend(Eu_res)\n head = head + 'SegS_Eu_CGI\\tSing_Eu_CGI\\tDupl_Eu_CGI\\tTajD_Eu_CGI\\t'\n\n As_res = []\n As_res.extend(afs_stats_bitarray.base_S_ss(seqAs_CGI_bits, nas_CGI))\n pi_AsCGI = afs_stats_bitarray.Pi2(As_res[3], nas_CGI)\n As_res.append(afs_stats_bitarray.Tajimas(pi_AsCGI, As_res[0], nas_CGI))\n del (As_res[3])\n res.extend(As_res)\n head = head + 'SegS_As_CGI\\tSing_As_CGI\\tDupl_As_CGI\\tTajD_As_CGI\\t'\n\n ##fst between populations\n res.append(afs_stats_bitarray.FST2(seqAF_CGI_bits, pi_AfCGI, naf_CGI, seqEu_CGI_bits, pi_EuCGI, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqAF_CGI_bits, pi_AfCGI, naf_CGI, seqAs_CGI_bits, pi_AsCGI, nas_CGI))\n res.append(afs_stats_bitarray.FST2(seqEu_CGI_bits, pi_EuCGI, neu_CGI, seqAs_CGI_bits, pi_AsCGI, nas_CGI))\n head = head + 'FST_AfEu_CGI\\tFST_AfAs_CGI\\tFST_EuAs_CGI\\t'\n\n ########Use Germline to find IBD on pseduo array ped and map files\n run_germline = int(argv[2])\n filenameped = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1.ped'\n filenamemap = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1.map'\n filenameout = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1'\n\n print 'run germline? '+str(run_germline)\n if (run_germline == 0):\n print 'Running Germline on ' + str(filenameped) + ' ' + str(filenamemap)\n print 'p ' + str(filenameped) + ' ' + str(filenamemap) + ' ' + str(filenameout) + ' \"-bits 10\"'\n germline = Popen.wait(Popen('bash ./bin/phasing_pipeline/gline.sh ./bin/germline-1-5-1/germline ' + str(filenameped) + ' ' + str(filenamemap) + ' ' + str(filenameout) + ' \"-bits 10\"', shell=True))\n\n print 'finished running germline'\n\n ########Get IBD stats from Germline output\n if os.path.isfile(str(filenameout) + '.match'):\n print 'reading Germline IBD output'\n filegermline = open(str(filenameout) + '.match', 'r')\n IBDlengths_AA = []\n IBDlengths_JJ = []\n IBDlengths_MM = []\n IBDlengths_EE = []\n IBDlengths_AE = []\n IBDlengths_AJ = []\n IBDlengths_AM = []\n IBDlengths_JM = []\n IBDlengths_JE = []\n IBDlengths_ME = []\n for line in filegermline:\n pop1 = line.split()[0]\n pop2 = line.split()[2]\n segment = float(line.split()[10])\n pair = str(pop1) + '_' + str(pop2)\n if pair == 'EA_EA' or pair == 'WA_WA' or pair == 'EA_WA' or pair == 'WA_EA':\n IBDlengths_AA.append(segment)\n if pair == 'J_J':\n IBDlengths_JJ.append(segment)\n if pair == 'M_M':\n IBDlengths_MM.append(segment)\n if pair == 'E_E':\n IBDlengths_EE.append(segment)\n if pair == 'EA_E' or pair == 'E_EA' or pair == 'WA_E' or pair == 'E_WA':\n IBDlengths_AE.append(segment)\n if pair == 'EA_J' or pair == 'J_EA' or pair == 'WA_J' or pair == 'J_WA':\n IBDlengths_AJ.append(segment)\n if pair == 'EA_M' or pair == 'M_EA' or pair == 'WA_M' or pair == 'M_WA':\n IBDlengths_AM.append(segment)\n if pair == 'J_M' or pair == 'M_J':\n IBDlengths_JM.append(segment)\n if pair == 'J_E' or pair == 'E_J':\n IBDlengths_JE.append(segment)\n if pair == 'M_E' or pair == 'E_M':\n IBDlengths_ME.append(segment)\n filegermline.close()\n\n print 'calculating summary stats'\n\n IBDlengths_mean = []\n IBDlengths_median = []\n IBDlengths_num = []\n IBDlengths_var = []\n IBDlengths_mean30 = []\n IBDlengths_median30 = []\n IBDlengths_num30 = []\n IBDlengths_var30 = []\n\n pairs = [IBDlengths_AA, IBDlengths_JJ, IBDlengths_MM, IBDlengths_EE, IBDlengths_AE, IBDlengths_AJ,\n IBDlengths_AM, IBDlengths_JM, IBDlengths_JE, IBDlengths_ME]\n for p in pairs:\n IBDlengths_num.append(len(p))\n if len(p) < 1:\n p.append(0)\n IBDlengths_mean.append(np.mean(p))\n IBDlengths_median.append(np.median(p))\n IBDlengths_var.append(np.var(p))\n #### Get IBD greater than 30 Mb\n IBDlengths30 = []\n for l in p:\n if l > 30:\n IBDlengths30.append(l)\n IBDlengths_num30.append(len(IBDlengths30))\n if len(IBDlengths30) == 0:\n IBDlengths30.append(0)\n IBDlengths_mean30.append(np.mean(IBDlengths30))\n IBDlengths_median30.append(np.median(IBDlengths30))\n IBDlengths_var30.append(np.var(IBDlengths30))\n\n\n res.extend(IBDlengths_mean)\n head = head + 'IBD_mean_AA\\tIBD_mean_JJ\\tIBD_mean_MM\\tIBD_mean_EE\\tIBD_mean_AE\\tIBD_mean_AJ\\tIBD_mean_AM\\tIBD_mean_JM\\tIBD_mean_JE\\tIBD_mean_ME\\t'\n res.extend(IBDlengths_median)\n head = head + 'IBD_median_AA\\tIBD_median_JJ\\tIBD_median_MM\\tIBD_median_EE\\tIBD_median_AE\\tIBD_median_AJ\\tIBD_median_AM\\tIBD_median_JM\\tIBD_median_JE\\tIBD_median_ME\\t'\n res.extend(IBDlengths_num)\n head = head + 'IBD_num_AA\\tIBD_num_JJ\\tIBD_num_MM\\tIBD_num_EE\\tIBD_num_AE\\tIBD_num_AJ\\tIBD_num_AM\\tIBD_num_JM\\tIBD_num_JE\\tIBD_num_ME\\t'\n res.extend(IBDlengths_var)\n head = head + 'IBD_var_AA\\tIBD_var_JJ\\tIBD_var_MM\\tIBD_var_EE\\tIBD_var_AE\\tIBD_var_AJ\\tIBD_var_AM\\tIBD_var_JM\\tIBD_var_JE\\tIBD_var_ME\\t'\n\n res.extend(IBDlengths_mean30)\n head = head + 'IBD30_mean_AA\\tIBD30_mean_JJ\\tIBD30_mean_MM\\tIBD30_mean_EE\\tIBD30_mean_AE\\tIBD30_mean_AJ\\tIBD30_mean_AM\\tIBD30_mean_JM\\tIBD30_mean_JE\\tIBD30_mean_ME\\t'\n res.extend(IBDlengths_median30)\n head = head + 'IBD30_median_AA\\tIBD30_median_JJ\\tIBD30_median_MM\\tIBD30_median_EE\\tIBD30_median_AE\\tIBD30_median_AJ\\tIBD30_median_AM\\tIBD30_median_JM\\tIBD30_median_JE\\tIBD30_median_ME\\t'\n res.extend(IBDlengths_num30)\n head = head + 'IBD30_num_AA\\tIBD30_num_JJ\\tIBD30_num_MM\\tIBD30_num_EE\\tIBD30_num_AE\\tIBD30_num_AJ\\tIBD30_num_AM\\tIBD30_num_JM\\tIBD30_num_JE\\tIBD30_num_ME\\t'\n res.extend(IBDlengths_var30)\n head = head + 'IBD30_var_AA\\tIBD30_var_JJ\\tIBD30_var_MM\\tIBD30_var_EE\\tIBD30_var_AE\\tIBD30_var_AJ\\tIBD30_var_AM\\tIBD30_var_JM\\tIBD30_var_JE\\tIBD30_var_ME\\t'\n\n\n Af_asc = []\n ss_Af_asc = afs_stats_bitarray.base_S_ss(seqAf_asc_bits, naf_CGI)\n if (ss_Af_asc[0] == 0):\n for i in xrange(5):\n Af_asc.append(0)\n pi_Af_asc = 0\n else:\n Af_asc.extend(afs_stats_bitarray.base_S_ss(seqAf_asc_bits, naf_CGI))\n pi_Af_asc = afs_stats_bitarray.Pi2(Af_asc[3], naf_CGI)\n Af_asc.append(pi_Af_asc)\n Af_asc.append(afs_stats_bitarray.Tajimas(pi_Af_asc, Af_asc[0], naf_CGI))\n del (Af_asc[3])\n res.extend(Af_asc)\n head = head + 'SegS_Af_ASC\\tSing_Af_ASC\\tDupl_Af_ASC\\tPi_Af_ASC\\tTajD_Af_ASC\\t'\n\n Eu_asc = []\n ss_Eu_asc = afs_stats_bitarray.base_S_ss(seqEu_asc_bits, neu_CGI)\n if (ss_Eu_asc[0] == 0):\n for i in xrange(5):\n Eu_asc.append(0)\n pi_Eu_asc = 0\n else:\n Eu_asc.extend(afs_stats_bitarray.base_S_ss(seqEu_asc_bits, neu_CGI))\n pi_Eu_asc = afs_stats_bitarray.Pi2(Eu_asc[3], neu_CGI)\n Eu_asc.append(pi_Eu_asc)\n Eu_asc.append(afs_stats_bitarray.Tajimas(pi_Eu_asc, Eu_asc[0], neu_CGI))\n del (Eu_asc[3])\n res.extend(Eu_asc)\n head = head + 'SegS_Eu_ASC\\tSing_Eu_ASC\\tDupl_Eu_ASC\\tPi_Eu_ASC\\tTajD_Eu_ASC\\t'\n\n As_asc = []\n ss_As_asc = afs_stats_bitarray.base_S_ss(seqAs_asc_bits, nas_CGI)\n if (ss_As_asc[0] == 0):\n for i in xrange(5):\n As_asc.append(0)\n pi_As_asc = 0\n else:\n As_asc.extend(afs_stats_bitarray.base_S_ss(seqAs_asc_bits, nas_CGI))\n pi_As_asc = afs_stats_bitarray.Pi2(As_asc[3], nas_CGI)\n As_asc.append(pi_As_asc)\n As_asc.append(afs_stats_bitarray.Tajimas(pi_As_asc, As_asc[0], nas_CGI))\n del (As_asc[3])\n res.extend(As_asc)\n head = head + 'SegS_As_ASC\\tSing_As_ASC\\tDupl_As_ASC\\tPi_As_ASC\\tTajD_As_ASC\\t'\n\n J_asc = []\n ss_J_asc = afs_stats_bitarray.base_S_ss(seqJ_asc_bits, nJ)\n if (ss_J_asc[0] == 0):\n for i in xrange(5):\n J_asc.append(0)\n pi_J_asc = 0\n else:\n J_asc.extend(afs_stats_bitarray.base_S_ss(seqJ_asc_bits, nJ))\n pi_J_asc = afs_stats_bitarray.Pi2(J_asc[3], nJ)\n J_asc.append(pi_J_asc)\n J_asc.append(afs_stats_bitarray.Tajimas(pi_J_asc, J_asc[0], nJ))\n del (J_asc[3])\n res.extend(J_asc)\n head = head + 'SegS_J_ASC\\tSing_J_ASC\\tDupl_J_ASC\\tPi_J_ASC\\tTajD_J_ASC\\t'\n\n M_asc = []\n ss_M_asc = afs_stats_bitarray.base_S_ss(seqM_asc_bits, nM)\n if (ss_M_asc[0] == 0):\n for i in xrange(5):\n M_asc.append(0)\n pi_M_asc = 0\n else:\n M_asc.extend(afs_stats_bitarray.base_S_ss(seqM_asc_bits, nM))\n pi_M_asc = afs_stats_bitarray.Pi2(M_asc[3], nM)\n M_asc.append(pi_M_asc)\n M_asc.append(afs_stats_bitarray.Tajimas(pi_M_asc, M_asc[0], nM))\n del (M_asc[3])\n res.extend(M_asc)\n head = head + 'SegS_M_ASC\\tSing_M_ASC\\tDupl_M_ASC\\tPi_M_ASC\\tTajD_M_ASC\\t'\n\n A_asc = []\n ss_A_asc = afs_stats_bitarray.base_S_ss(seqA_asc_bits, nA)\n if (ss_A_asc[0] == 0):\n for i in xrange(5):\n A_asc.append(0)\n pi_A_asc = 0\n else:\n A_asc.extend(afs_stats_bitarray.base_S_ss(seqA_asc_bits, nA))\n pi_A_asc = afs_stats_bitarray.Pi2(A_asc[3], nA)\n A_asc.append(pi_A_asc)\n A_asc.append(afs_stats_bitarray.Tajimas(pi_A_asc, A_asc[0], nA))\n del (A_asc[3])\n res.extend(A_asc)\n head = head + 'SegS_A_ASC\\tSing_A_ASC\\tDupl_A_ASC\\tPi_A_ASC\\tTajD_A_ASC\\t'\n\n res.append(afs_stats_bitarray.FST2(seqAf_asc_bits, pi_Af_asc, naf_CGI, seqEu_asc_bits, pi_Eu_asc, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqAf_asc_bits, pi_Af_asc, naf_CGI, seqAs_asc_bits, pi_As_asc, nas_CGI))\n res.append(afs_stats_bitarray.FST2(seqEu_asc_bits, pi_Eu_asc, neu_CGI, seqAs_asc_bits, pi_As_asc, nas_CGI))\n head = head + 'FST_AfEu_ASC\\tFST_AfAs_ASC_m\\tFST_EuAs_ASC\\t'\n\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqEu_asc_bits, pi_Eu_asc, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqJ_asc_bits, pi_J_asc, nJ))\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqM_asc_bits, pi_M_asc, nM))\n res.append(afs_stats_bitarray.FST2(seqM_asc_bits, pi_M_asc, nM, seqJ_asc_bits, pi_J_asc, nJ))\n head = head + 'FST_AEu_ASC\\tFST_AJ_ASC\\tFST_AM_ASC\\tFST_MJ_ASC\\n'\n\n filesummary='real_output.summary'\n filesumm=open(filesummary,'w')\n filesumm.write(head)\n\n out=''\n for g in range(len(res)):\n out=out+str(res[g])+'\\t'\n out=out[:-1]+'\\n'\n\n filesumm.write(out)\n filesumm.close()\n\n return res", "def main_loop(csd_profile, csd_seed, total_ele):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n h = 10.\n\n #TrueCSD\n start_x, end_x, csd_res = [0.,1.,100] \n t_csd_x, true_csd = generate_csd_1D(csd_profile, csd_seed, \n start_x=start_x, \n end_x=end_x, \n res_x=csd_res)\n \n #Electrodes \n ele_res = int(total_ele) \n ele_lims = [0.10, 0.9]\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, h)\n num_ele = ele_pos.shape[0]\n print 'Number of electrodes:', num_ele\n x_array_pots, true_pots = electrode_config(ele_lims, 100, true_csd, t_csd_x, h)\n\n #kCSD estimation\n gdX = 0.01\n x_lims = [0.,1.] #CSD estimation place\n tic = time.time() #time it\n k, est_csd, est_pot = do_kcsd(ele_pos, pots, h=h, gdx=gdX,\n xmin=x_lims[0], xmax=x_lims[1], n_src_init=300)\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, test_csd = generate_csd_1D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1], \n res_x=int((x_lims[1]-x_lims[0])/gdX))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title =\"Lambda: %0.2E; R: %0.2f; CV_Error: %0.2E; RMS_Error: %0.2E; Time: %0.2f\" %(k.lambd, k.R, k.cv_error, rms, toc)\n make_plots(title, t_csd_x, true_csd, ele_pos, pots, k.estm_x, est_csd, est_pot, true_pots)\n return", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist", "def main():\n cursor = PGCONN.cursor()\n # track our work\n with open(\"myhucs.txt\", \"w\") as fh:\n # Change the working directory to where we have data files\n os.chdir(\"../../data/%s\" % (sys.argv[2],))\n # collect up the GeoJSONs in that directory\n fns = glob.glob(\"smpldef3m_*.json\")\n fns.sort()\n i = 0\n\n for fn in fns:\n # Save our work every 100 HUC12s,\n # so to keep the database transaction\n # at a reasonable size\n if i > 0 and i % 100 == 0:\n PGCONN.commit()\n cursor = PGCONN.cursor()\n df, snapdf = get_data(fn)\n huc12 = process(cursor, fn, df, snapdf)\n fh.write(\"%s\\n\" % (huc12,))\n i += 1\n\n # Commit the database changes\n cursor.close()\n PGCONN.commit()\n LOG.info(\"Complete.\")", "def main(argv):\n args = process_command_line(argv)\n name = job_string(args)\n #That feel when no torison ;_;\n if args.dihed:\n raise Exception(\"Dihed is not supported right now\")\n #SDFS!\n if args.sdf:\n handle_sdf(args)\n #Conversion, pruning\n pybel_mols = convert_to_pybel(args.files, args.format)\n if args.pruneStart:\n pybel_mols = prune(pybel_mols, args.pruneStart)\n print \"Total number of molecules to process is\", len(pybel_mols)\n #Division\n if args.division:\n grouped_pybels = molecule_grouping.main(args.division, pybel_mols)\n else:\n grouped_pybels = [pybel_mols]\n #Run algorithm\n groups_reps, weights = run_smrs(grouped_pybels, args.dihed, args.nonH, args.energy,\n args.alpha, args.delCoordCSV, args.delCoefCSV, name)\n prune_finished = False\n #Pruning representatives\n if args.pruneFinish:\n all_reps = []\n for group in groups_reps:\n all_reps += group\n all_reps = prune(all_reps, args.pruneFinish)\n prune_finished = True\n #Save all groups into one folder\n folder_name = 'rep_' + name\n if args.folder:\n #folder creation\n while True:\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n break\n else:\n folder_name = folder_name + 'c'\n #copying\n if prune_finished:\n for mol in all_reps:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n else:\n for group in groups_reps:\n for mol in group:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n print \"Coeficient matrix results\"\n for i in range(len(grouped_pybels)):\n for j in range(len(grouped_pybels[i])):\n print grouped_pybels[i][j].title, weights[i][j]\n print \"\"\n print \"Rep mols\"\n for group in groups_reps:\n for mol in group:\n print mol.title\n return groups_reps", "def update_compdatabase():\n for comp_group in comp_entry:\n#\n#--- read the last set of the input data and find the last entry \n#\n past = house_keeping + comp_group + '_past'\n past = mcf.read_data_file(past)\n\n last = past[-1]\n#\n#--- find today's data entry\n#\n cmd = 'ls /data/mta_www/mp_reports/*/' + comp_group + '/data/mta*fits* >' + zspace\n os.system(cmd)\n current = mcf.read_data_file(zspace)\n\n cmd = 'mv '+ zspace + ' ' + house_keeping + comp_group + '_past'\n os.system(cmd)\n#\n#--- find the data which are not read\n#\n new_fits = []\n chk = 0\n for ent in current:\n if chk == 0:\n if ent == last:\n chk = 1\n continue\n new_fits.append(ent)\n#\n#--- uppend the data to the local fits data files\n#\n for fits in new_fits:\n [cols, tbdata] = ecf.read_fits_file(fits)\n\n time = tbdata['time']\n\n for col in cols:\n#\n#--- ignore columns with \"ST_\" (standard dev) and time\n#\n if col.lower() == 'time':\n continue\n\n mc = re.search('st_', col.lower())\n if mc is not None:\n continue\n\n mdata = tbdata[col]\n cdata = [time, mdata]\n ocols = ['time', col.lower()]\n\n ofits = out_dir + col.lower()+ '_full_data.fits'\n if os.path.isfile(ofits):\n update_fits_file(ofits, ocols, cdata)\n else:\n create_fits_file(ofits, ocols, cdata)", "def dataset_fillCohnKanade( dsFolder, ckFolder, ckEmoFolder, config, vperc=0.3, vseed=0):\n\n subjects=[ x for x in os.listdir(ckFolder) if isdir(join(ckFolder, x)) ]\n print \"INFO: %d subjects found in CK+ database\" % len(subjects)\n\n for subj in subjects:\n print \"INFO: Processing subject %s \" % subj\n \n labelFolders=[x for x in os.listdir(join(ckEmoFolder, subj)) if isdir(join(ckEmoFolder, join(subj, x)))]\n imageFolders=[x for x in os.listdir(join(ckFolder, subj)) if isdir(join(ckEmoFolder, join(subj, x)))]\n\n shots=[x for x in imageFolders if x in labelFolders]\n for s in shots:\n print \"INFO: Processing shot %s \" % s\n \n pics=[x for x in os.listdir( join(ckFolder, join(subj,s)) ) if isfile(join(ckFolder, join(subj, join(s, x))))]\n pics.sort()\n labels=[x for x in os.listdir( join(ckEmoFolder, join(subj, s)) ) if isfile( join(ckEmoFolder, join(subj, join(s, x)) )) ]\n if len(labels)<1 or len(pics)<1:\n # label forlder could contain no file at all, in this case skip the current shot or mark it as neutral?\n print \"WARN: subject %s shot %s has #%d emo labels and #%d pictures, (skip:incomplete)\" %( subj, s, len(labels), len(pics))\n continue\n emo=None\n with open(join(ckEmoFolder, join(subj, join(s, labels[0]))), \"r\") as f:\n buf=f.read()\n if len(buf)==0:\n print \"WARN: subject %s shot %s has void emo label '%s', (skip:noemo)\" % (subj, s, join(ckEmoFolder, join(subj, join(s, labels[0]))))\n # A label file could be void, in this case skip the current shot\n continue\n try:\n emo=config['CLASSES'][int(float(strip(buf)))]\n except:\n print \"ERR: cannot parse emotional label for subject %s shot %s (skip:unknown_emo)\" % (subj, s)\n continue\n\n # Last picture is the final emotion (most intense), first picture is neutral\n to_copy = [(pics[-1], emo), (pics[0], config['CLASSES'][0])]\n\n for pic, emo in to_copy:\n print \"INFO: Picture '%s' has been marked as %s\" % (pic, emo)\n orig = join(ckFolder, join(subj, join(s, pic)))\n IMAGES_FOLDER = config['TRAINING_IMAGES']\n if random.random() <= vperc:\n IMAGES_FOLDER = config['VALIDATION_IMAGES']\n dest = join(dsFolder, join(IMAGES_FOLDER, join(emo, pic)))\n try:\n shutil.copy(orig, dest)\n except:\n print \"ERR: cannot copy image '%s' to dataset '%s' \"%(orig, dest)\n continue", "def run_calcs(pattern: str, time='1d', memory='2GB', outfile='outfile'):\n cwd = os.getcwd()\n\n time = time.lower()\n if time[-1] == 'd':\n time = int(time[:-1]) * 24 * 60\n elif time[-1] == 'h':\n time = int(time[:-1]) * 60\n elif time[-1] == 'm':\n time = int(time[:-1])\n else:\n raise ValueError('Time must be given in minutes, hours, or days (e.g. 1440m, 24h, 1d).')\n\n memory = memory.upper()\n if memory[-2:] not in ['MB', 'GB']:\n raise ValueError('Memory must be given as a MB or GB (e.g. 1024MB, 1GB)')\n\n for filename in glob.glob(pattern):\n if os.path.commonpath([cwd, os.path.abspath(filename)]) != cwd:\n continue\n filename = os.path.abspath(filename)[len(cwd)+1:]\n\n _, _, orbital, *wfn = filename.split(os.sep)\n if os.path.isdir(filename):\n os.chdir(filename)\n else:\n dirname, filename = os.path.split(filename)\n os.chdir(dirname)\n submit_job = False\n\n if orbital == 'mo' and os.path.splitext(filename)[1] == '.com':\n # write script (because sbatch only takes one command)\n with open('hf_sp.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write(f'g16 {filename}\\n')\n command = ['hf_sp.sh']\n submit_job = True\n elif orbital == 'mo' and os.path.splitext(filename)[1] == '.chk':\n command = ['formchk', filename]\n submit_job = False\n elif orbital == 'mo' and os.path.splitext(filename)[1] == '.fchk':\n command = [os.environ.get('HORTONPYTHON'),\n '/project/def-ayers/kimt33/fanpy/scripts/horton_gaussian_fchk.py',\n 'hf_energies.npy', 'oneint.npy', 'twoint.npy', 'fchk_file', filename]\n submit_job = False\n elif len(wfn) == 2:\n if os.path.splitext(filename)[1] == '.py':\n with open('results.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write('cwd=$PWD\\n')\n f.write('for i in */; do\\n')\n f.write(' cd $i\\n')\n f.write(' python ../calculate.py > results.out\\n')\n f.write(' cd $cwd\\n')\n f.write('done\\n')\n else:\n with open('results.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write(f'python ../calculate.py\\n')\n command = ['results.sh']\n submit_job = True\n\n # print(' '.join(['sbatch', f'--time={time}', f'--output={outfile}', f'--mem={memory}',\n # '--account=rrg-ayers-ab', command]))\n if submit_job:\n subprocess.run(['sbatch', f'--time={time}', f'--output={outfile}', f'--mem={memory}',\n '--account=rrg-ayers-ab', *command])\n else:\n subprocess.run(command)\n\n # change directory\n os.chdir(cwd)", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)", "def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1", "def test_run_full(mk_tmp_dirs):\n tmp_current_path, tmp_data_path, tmp_config_path = mk_tmp_dirs\n\n cfg_dir = path.join(tmp_data_path, 'cfgs')\n collect_pipeline_cfgs(cfg_dir)\n\n asn_path = path.join(DATAPATH, 'mosaic_long_asn.json')\n args = [\n path.join(cfg_dir, 'calwebb_image3.cfg'),\n asn_path,\n ]\n\n Step.from_cmdline(args)\n\n # Check for the CRF files\n with open(asn_path) as fh:\n asn = load_asn(fh)\n expfilenames = [\n path.split(path.splitext(member['expname'])[0])[1]\n for member in asn['products'][0]['members']\n ]\n crffilenames = []\n for expfilename in expfilenames:\n name = remove_suffix(path.splitext(expfilename)[0])[0]\n crffilenames.append(name + '_a3001_crf.fits')\n for crffilename in crffilenames:\n assert path.isfile(crffilename)\n\n # Check for the level3 products\n product_name = asn['products'][0]['name']\n assert path.isfile(product_name + '_cat.ecsv')\n assert path.isfile(product_name + '_i2d.fits')", "def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()", "def main():\n\n\t# =========== Skim file & output file ===========\n\tskimLoc = \"$MJDDATADIR/surfmjd/analysis/skim/DS1/20160621_265313037/*.root\"\n\t# skimLoc = \"/Users/wisecg/datasets/ds1/*.root\"\n\t# wsOut = \"./output/waveSkim-1550-1650.root\"\n\twsOut = \"./output/waveSkim-1500-2000-mH-2.root\"\n\n\t# =========== Skim file cuts ===========\n\tburstCut = \"!(time_s > 2192e3 && time_s < 2195e3) && !(time_s > 7370e3 && time_s < 7371e3) && !(time_s > 7840e3 && time_s < 7860e3) && !(time_s > 8384e3 && time_s < 8387e3) && !(time_s > 8984e3 && time_s < 8985e3) && !(time_s > 9002e3 && time_s < 9005e3) && run != 13075 && run != 13093 && run != 13116\"\n\n\t# low-energy noisy runs cut - need to research & refine\n\t# runCut = \"run!=13312 && run!=13121 && run!=13004 && run!=12766 && run!=12735 && run!=12445 && run!=11175 && run!=12723 && run!=12746 && run!=12767 && run!=13071 && run!=13073 && run!=13074 && run!=13120 && run!=13205 && run!=13306 && run!=13307 && run!=9857 && run!=9862 && run!=9863\"\n\n\t# bigCut = \"channel%2==0 && mH==1 && (trapENFCal>1550 && trapENFCal<1650) && !wfDCBits && !muVeto && !isLNFill &&\" + burstCut\n\n\tbigCut = \"channel%2==0 && mH>1 && sumEH>1500 && !wfDCBits && isGood && \" + burstCut\n\n\t# =========== Ready? Go! ===========\n\tskimmer(bigCut, skimLoc, wsOut)\n\t# skimChecker(wsOut)", "def run_script(input_dir, output_dir, output_file, bstp_num):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Load results from step 1 & 2 \"\"\")\n start_0 = time.time()\n data_dim_file_name = output_dir + \"/temp/data_dim.mat\"\n mat = loadmat(data_dim_file_name)\n data_dim = mat['data_dim']\n data_dim = np.array([int(i) for i in data_dim[0, :]])\n n, l, m, p, g, g_num = data_dim\n y_design_file_name = output_dir + \"/temp/y_design.mat\"\n mat = loadmat(y_design_file_name)\n y_design = mat['y_design']\n resy_design_file_name = output_dir + \"/temp/resy_design.mat\"\n mat = loadmat(resy_design_file_name)\n resy_design = mat['resy_design']\n efit_eta_file_name = output_dir + \"/temp/efit_eta.mat\"\n mat = loadmat(efit_eta_file_name)\n efit_eta = mat['efit_eta']\n esig_eta_file_name = output_dir + \"/temp/esig_eta.mat\"\n mat = loadmat(esig_eta_file_name)\n esig_eta = mat['esig_eta']\n hat_mat_file_name = output_dir + \"/temp/hat_mat.mat\"\n mat = loadmat(hat_mat_file_name)\n hat_mat = mat['hat_mat']\n snp_file_name = output_dir + \"/temp/snp.mat\"\n mat = loadmat(snp_file_name)\n snp = mat['snp']\n # read the image size\n img_size_file_name = input_dir + \"img_size.txt\"\n img_size = np.loadtxt(img_size_file_name)\n img_size = np.array([int(i) for i in img_size])\n # read the image index of non-background region\n img_idx_file_name = input_dir + \"img_idx.txt\"\n img_idx = np.loadtxt(img_idx_file_name)\n img_idx = np.array([int(i) for i in img_idx])\n end_0 = time.time()\n print(\"Elapsed time in Step 3 is \", end_0 - start_0)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Step 3. Significant locus-voxel and locus-subregion detection \"\"\")\n start_3 = time.time()\n alpha = 1e-5\n c_alpha = -10**alpha\n bstp_num = int(bstp_num)\n max_stat_bstp, max_area_bstp = wild_bstp(snp, y_design, resy_design, efit_eta, esig_eta, hat_mat,\n img_size, img_idx, c_alpha, g_num, bstp_num)\n print(max_stat_bstp)\n print(max_area_bstp)\n bstp_out = np.hstack((max_stat_bstp, max_area_bstp))\n bstp_out_file_name = output_dir + output_file\n np.savetxt(bstp_out_file_name, bstp_out)\n end_3 = time.time()\n print(\"Elapsed time in Step 3 is \", end_3 - start_3)", "def main():\n run_test_suite('../models/iEK1008.json') # runs test suite with iEK1008.json\n\n # rewrites iEK1008.json to iMtb_H37Rv.json so original model is not overwritten\n model_iek = cobra.io.load_json_model('../models/iEK1008.json')\n cobra.io.save_json_model(model_iek, '../models/iMtb_H37Rv.json')\n model = cobra.io.load_json_model('../models/iMtb_H37Rv.json')\n\n # removes 10 imbalanced reactions from iEK1008; all 10 reactions are added back with balanced formulas during update\n rxns_to_bal = [rxn.id for rxn in model.reactions if len(rxn.check_mass_balance()) > 0\n if 'EX_' not in rxn.id and 'DM_' not in rxn.id and 'BIOMASS' not in rxn.id]\n\n for rxn_to_bal in rxns_to_bal:\n model.reactions.get_by_id(rxn_to_bal).remove_from_model()\n cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json')\n\n run_test_suite('../models/iMtb_H37Rv.json', update='imbalanced_reactions_removed')\n\n # creates COBRApy Metabolite objects for new metabolites\n df_new_mets = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='metabolites_added', usecols='A:C')\n\n new_mets = {}\n for index, row in df_new_mets.iterrows():\n new_met_id = str(row['Metabolite_ID'])\n new_met_name = row['Metabolite_Name']\n new_met_formula = row['Metabolite_Formula']\n if new_met_id.endswith('c'):\n new_met_comp = 'c'\n elif new_met_id.endswith('e'):\n new_met_comp = 'e'\n else:\n print('Metabolite compartment could not be determined. Please check metabolite id.')\n new_met_comp = ''\n new_met = cobra.Metabolite(new_met_id, name=new_met_name, formula=new_met_formula, compartment=new_met_comp)\n new_mets[new_met_id] = new_met\n\n df_new_rxns = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='reactions_added', usecols='A:G')\n\n with alive_bar(len(df_new_rxns), bar='blocks', spinner='notes_scrolling') as bar:\n for index, row in df_new_rxns.iterrows():\n new_rxn_mets = {}\n new_rxn_form = row['Reaction_Formula']\n if ' --> ' in new_rxn_form:\n new_rxn_form = new_rxn_form.split(' --> ')\n elif ' <=> ' in new_rxn_form:\n new_rxn_form = new_rxn_form.split(' <=> ')\n else:\n print('Unexpected symbol in ' + row['Reaction_Formula'])\n\n subs = new_rxn_form[0].split(' + ')\n for sub in subs:\n if '.0' in sub:\n sub_coeff = -1 * float(sub.split(' ')[0])\n sub_id = sub.split(' ')[-1]\n try:\n new_rxn_sub = new_mets[sub_id]\n except KeyError: # metabolite is not new, i.e. already in iEK1008\n new_rxn_sub = model.metabolites.get_by_id(sub_id)\n else:\n sub_coeff = -1.0\n try:\n new_rxn_sub = new_mets[sub]\n except KeyError:\n new_rxn_sub = model.metabolites.get_by_id(sub)\n new_rxn_mets[new_rxn_sub] = sub_coeff\n\n pros = new_rxn_form[1].split(' + ')\n for pro in pros:\n if '.0' in pro:\n pro_coeff = float(pro.split(' ')[0])\n pro_id = pro.split(' ')[-1]\n try:\n new_rxn_pro = new_mets[pro_id]\n except KeyError:\n new_rxn_pro = model.metabolites.get_by_id(pro_id)\n else:\n pro_coeff = 1.0\n try:\n new_rxn_pro = new_mets[pro]\n except KeyError:\n new_rxn_pro = model.metabolites.get_by_id(pro)\n new_rxn_mets[new_rxn_pro] = pro_coeff\n\n # creates new reactions with new COBRApy Reaction and Metabolite objects\n create_reaction(model, row['Reaction_ID'], row['Reaction_Name'], row['Subsystem'], new_rxn_mets,\n float(row['Lower_Bound']), float(row['Upper_Bound']), row['Gene_Reaction_Rule'])\n\n cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json')\n\n run_test_suite('../models/iMtb_H37Rv.json', update=row['Reaction_ID'])\n\n bar()\n\n return", "def consolidate(max_rounds, int_fwm,master_index, index, filename = 'data_large'):\n\n\n layer_0 = '0/0'\n filepath = 'output{}/output{}/data/'.format(master_index, index)\n file_read = filepath + filename\n file_save = filepath + filename+'_conc'\n \n # Input data, small, no need to cons\n D = read_variables(file_read, '0/0')\n save_variables(file_save, 'input', **D)\n\n if max_rounds ==0:\n max_rounds +=1\n U_cons = np.zeros([4,max_rounds, 7*int_fwm.nt], dtype = np.complex128)\n # Reading of all the oscillating spectra and sending them to a 3D array\n unfortmated_string = '{}/{}/U'\n with h5py.File(file_read+'.hdf5', 'r') as f:\n for pop in range(1,5):\n for r in range(max_rounds):\n U_cons[pop - 1,r,:] = f.get(unfortmated_string.format(pop,r)).value\n save_variables(file_save, 'results', U = U_cons) \n os.system('mv '+file_save+'.hdf5 '+file_read+'.hdf5')\n return None", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]" ]
[ "0.6137783", "0.610961", "0.6097006", "0.6071778", "0.59867096", "0.5944537", "0.59281546", "0.5890876", "0.5888353", "0.58699083", "0.5863766", "0.5852129", "0.58403766", "0.58233374", "0.58191043", "0.5813233", "0.5811937", "0.5777856", "0.5767777", "0.5765127", "0.57320464", "0.5724286", "0.572312", "0.5699994", "0.56938756", "0.56784886", "0.56725454", "0.5670405", "0.566777", "0.5659553" ]
0.6892021
0
returns if postcode like
def is_postal_code(elem): return 'post' in elem.attrib['k']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postcode(self):\n return self._postcode", "def postcode(self):\n return self._postcode", "def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response", "def detect_postcode_type(postcode):\n postcode_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}[\\s]*[0-9][A-Z]{2}$'\n district_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}$'\n area_pattern = r'^[A-Z]{1,2}$'\n\n postcode = clean_postcode(postcode)\n\n # Convert x to a pandas series\n postcode = pd.Series(np.atleast_1d(postcode))\n\n postcode_type = np.where(\n postcode.str.match(postcode_pattern), 'postcode',\n np.where(\n postcode.str.match(district_pattern), 'district',\n np.where(\n postcode.str.match(area_pattern), 'area', 'none'\n )\n )\n )\n\n return postcode_type", "def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)", "def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None", "def verify_postcode_api(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n request_path = requests.get(self.path + self.postcodes, verify=False)\n response_code = str(request_path)\n\n if response_code == '<Response [200]>':\n verification_status = 'Verified'\n elif response_code == '<Response [404]>':\n verification_status = 'Invalid Postcode'\n elif response_code == '<Response [400]':\n verification_status = 'No Postcode Submitted'\n elif response_code == '<Response [500]':\n verification_status = 'Server error'\n else:\n verification_status = 'Invalid Postcode'\n return verification_status", "def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)", "def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)", "def get_info_on_postalcode(_, postalcode):\n fourpp = int(postalcode[0:4])\n chars = postalcode[4:6]\n streets = get_streets(fourpp, chars)\n if streets:\n street = streets[0]\n town = street.postcode.city.get_official_name()\n address = street.street\n data = {'found': True, 'address': address, 'town': town}\n else:\n data = {'found': False}\n j = json.dumps(data)\n return HttpResponse(j, content_type='application/json')", "def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def test_getLocationFromPostcode1(self):\n \n pstPrc=PostcodeProcessor()\n coords=pstPrc.getLocationFromPostcode(self.postcode1)\n self.assertEqual(coords.latitude,self.longLat1.latitude)\n self.assertEqual(coords.longitude,self.longLat1.longitude)", "def test_parse_post_code_field(self):\n fields = {'Post code': {'offset': 171,\n 'length': 4}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Post code': '2048'}\n msg = 'Post code field parse incorrect'\n self.assertEqual(received, expected, msg)", "def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1", "def postal_code(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"postal_code\")", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def postal_code(self, instance):\r\n return instance.user.profile.postal_code", "def country(alpha_2_code: str) -> None:", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def postal(self):\n if self.index >= self.length:\n return False \n \n if self._sta and \"CA-\" in self._sta:\n self._pst, n = self.parse_postalCA()\n else:\n self._pst, n = self.parse_postalUS()\n \n if self._pst is not None:\n self.idx_pst = self.index\n self.index += n\n if self._debug: print(\"PST\", self._pst, self.idx_pst)\n return True\n return False", "def _select_market_code(code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\"]:\n return 1\n return 0", "def geocode_one(self, postcode: str, address: Optional[str] = None) -> pd.Series:\n if postcode is None and address is None:\n raise utils.GenericException(\"You must pass either postcode or address, or both.\")\n if self.gmaps_key is None:\n self.gmaps_key = self._load_key()\n if self.gmaps_key is not None:\n self.gmaps_client = googlemaps.Client(key=self.gmaps_key)\n if self.cache is None:\n self._load_cache()\n sep = \", \" if address and postcode else \"\"\n postcode = postcode if postcode is not None else \"\"\n address = address if address is not None else \"\"\n search_term = f\"{address}{sep}{postcode}\"\n if search_term in self.cache:\n logging.debug(\"Loading GMaps Geocoder API result from cache: '%s'\", search_term)\n geocode_result = self.cache[search_term]\n else:\n logging.debug(\"Querying Google Maps Geocoder API for '%s'\", search_term)\n if self.gmaps_key is None:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geocode_result = self.gmaps_client.geocode(search_term, region=\"uk\")\n self.cache[search_term] = geocode_result\n self.cache_modified = True\n if not geocode_result or len(geocode_result) > 1:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geometry = geocode_result[0][\"geometry\"]\n ok_loc_types = [\"ROOFTOP\", \"GEOMETRIC_CENTER\"]\n if geometry[\"location_type\"] in ok_loc_types or \\\n geocode_result[0][\"types\"] == [\"postal_code\"]:\n return pd.Series({\"latitude\": geometry[\"location\"][\"lat\"],\n \"longitude\": geometry[\"location\"][\"lng\"],\n \"match_status\": 3})\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")" ]
[ "0.65805596", "0.65805596", "0.6374579", "0.63254106", "0.6238786", "0.6128216", "0.6051144", "0.60343444", "0.586208", "0.58339965", "0.5811099", "0.57576764", "0.57576764", "0.5754006", "0.5704175", "0.56964314", "0.56823075", "0.56657773", "0.5656511", "0.5651277", "0.56479186", "0.56326675", "0.5614085", "0.56061846", "0.56052697", "0.5603055", "0.5600199", "0.5586636", "0.5574197", "0.5574197" ]
0.7000855
0
Remove journal entry at position `pos`.
def remove_entry(self, pos: int) -> None: del self.entries[pos]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_pos(self, pos):\n yield from self.command('delete {}'.format(pos))\n return True", "def delete(self, pos):\n if self.is_playing() and self.current_position() == pos:\n self.x.playback_stop().wait()\n self.x.playlist_remove_entry(pos).wait()", "def delete_row(self, pos):\n del self._grid[pos]", "def pyreadline_remove_history_item(pos: int) -> None:\n # Save of the current location of the history cursor\n saved_cursor = readline.rl.mode._history.history_cursor\n\n # Delete the history item\n del readline.rl.mode._history.history[pos]\n\n # Update the cursor if needed\n if saved_cursor > pos:\n readline.rl.mode._history.history_cursor -= 1", "def delete(self,pos):\n pos.next = pos.next.next", "def delete_data(self, *pos):\n r, c = pos\n self._grid[r][c] = None", "def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length", "def remove_from_level(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n level = level[:index] + AIR + level[1 + index:]\n return level", "def delete(self, del_pos=None):\n if del_pos is None:\n del_pos = self.__length\n if self.__list is None:\n print \"Nothing to remove.\"\n else:\n if del_pos == 0:\n self.__list = self.__list.get_next()\n else:\n prior = self.__list\n current = self.__list.get_next()\n current_pos = 1\n while current.get_next() is not None and current_pos < del_pos:\n prior = current\n current = current.get_next()\n current_pos += 1\n prior.set_next(current.get_next())\n self.__length -= 1", "def delete_node_at_pos(self, pos):\n if self.head:\n cur_node = self.head\n if pos == 0:\n self.head = cur_node.next\n cur_node = None\n return \n\n prev = None\n count = 0 \n while cur_node and count != pos:\n prev = cur_node\n cur_node = cur_node.next\n count += 1\n\n if cur_node is None:\n return \n\n prev.next = cur_node.next\n cur_node = None", "def delete(self, node):\n\n # logger_cagada.debug(\"norrando nodo %s\" % (type(node)))\n entry = self.entry_finder.pop(node)\n # logger_cagada.debug(\"la entry q c borra %s\" % entry)\n entry[-1] = self.REMOVED\n # logger_cagada.debug(\"el heap es %s\" % self.heap)\n return entry[0]", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def DeleteToolByPos(self, pos):\r\n \r\n if pos >= 0 and pos < len(self._items):\r\n \r\n self._items.pop(pos)\r\n self.Realize()\r\n return True\r\n\r\n return False", "def delete_column(self, pos):\n for i in range(len(self._grid)):\n del self._grid[i][pos]", "def troop_remove(self, pos):\n x, y = pos\n # tile_id = AIV_SIZE * y + x\n \n troop = self.tmap[y, x]\n if (troop == 0):\n return\n \n # update tmap\n self.tmap[y, x] = 0\n\n # first remove thing from tarr, then find something new in tmap\n\n\n # for slot in range(0, len(self.tarr)):\n # if (self.tarr[slot] == tile_id):\n # self.tmap[y, x] = slot//10\n \n # # update tarr\n # for slot in range(10*troop, 11*troop):\n # if (self.tarr[slot] == tile_id):\n # for slot_slot in range(slot, 11*troop-1):\n # self.tarr[slot_slot] = self.tarr[slot_slot+1]", "def deleteEntry (self,event=None):\n \n c = self.c ; box = self.box\n \n # Work around an old Python bug. Convert strings to ints.\n items = box.curselection()\n try:\n items = map(int, items)\n except ValueError: pass\n \n if items:\n n = items[0]\n p = self.position[n]\n del self.positionList[n]\n if p in c.visitedList:\n c.visitedList.remove(p)\n self.fillbox()", "def cleanTileAtPosition(self, pos):\n self.tiles[pos] = 'clean'", "def remove(self, e):\n p = self._find_position(e) # try to locate existing element\n if p is not None:\n self._data.delete(p) # delete if found", "def remove_column(self, pos, labels=\"REMOVE\"):\n MutableAlignment.remove_column(self, pos)\n if labels == \"RESET\":\n self._reset_col_names()\n elif labels == \"REMOVE\":\n self._col_labels = self._col_labels[:pos] + \\\n self._col_labels[pos + 1:]", "def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None", "def remove(self, val):\n if val in self.posFind and self.posFind[val] != -1:\n delPos = self.posFind[val]\n self.nums[delPos] = self.nums[-1]\n self.posFind[self.nums[-1]] = delPos\n self.posFind[val] = -1\n self.nums.pop()\n return True\n return False", "def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)", "def fs_remove_entry(self, path):\n\t\treturn Job(SDK.PrlSrv_FsRemoveEntry(self.handle, path)[0])", "def remove(self, uid: int) -> int:\n\n bookmark = self.find_id(uid)\n\n deletions = 0\n\n if bookmark:\n deletions = self._delete(\n \"UPDATE bookmarks SET deleted=CURRENT_TIMESTAMP WHERE rowid=?\",\n (bookmark[\"rowid\"],)\n )\n\n if bookmark[\"tags\"]:\n cherrypy.engine.publish(\"cache:clear\", \"bookmarks:all_tags\")\n\n return deletions", "def delete_after_position(self, position: int) -> None:\n if self.get_length() < position:\n return\n elif self.get_length() == 0:\n return self.delete_at_beginning()\n elif self.get_length() == position:\n return self.delete_at_end()\n else:\n previous = self.head\n current = self.head\n for _ in range(position):\n previous = current\n current = current.get_next_node()\n previous.set_next_node(current.get_next_node())\n current.get_next_node().set_previous_node(previous)\n current.set_next_node(None)\n current.set_previous_node(None)\n temp = current.get_data()\n del current\n self._decrease_length()\n return temp", "def _delChar(self, pos):\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n self.colors.pop(pos)\n self._string_metric = self._string_metric[:pos]\n self.text = self.text[:pos] + self.text[pos + 1:]\n self._updateGlyphs(pos)", "def DeleteElement(self, position):\n self.__context.builder.DocumentElementDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n position)", "def delete_entry_from_db(entry):\n db.session.delete(entry)\n db.session.commit()", "def cleanTileAtPosition(self, pos):\n #Return the floor of x as a float, the largest integer value less than\n #or equal to x\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n self.tiles[(posx, posy)] = 1 # using 0 as dirty value, 1 as clean value, of key tuple pos(x,y)\n #self.printTiles()\n #raise NotImplementedError", "def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]" ]
[ "0.6454446", "0.6282452", "0.6261571", "0.61265475", "0.6009769", "0.59815764", "0.58335143", "0.57944274", "0.5689213", "0.5677288", "0.56020075", "0.55991143", "0.55826235", "0.55457", "0.55305755", "0.55185974", "0.5401358", "0.5388663", "0.5358339", "0.5331406", "0.530921", "0.5306277", "0.5300817", "0.5296484", "0.52889776", "0.5283555", "0.5265191", "0.5234831", "0.52252215", "0.5222931" ]
0.81650764
0
Save journal entries into a file.
def save(journal: Journal, file: Path) -> None: with open(file, "w") as output: output.writelines(f"{entry}\n" for entry in journal.get_entries())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_txt():\n # open file and append, if it doesn't exist then create it.\n with open('journal_entries.txt', 'a+') as f:\n # .get the input in text widget at the first line, '0th' character, then read until the end\n f.write(\"\\n\" + get_date_time())\n for i in range(len(entries)):\n string = entries[i].get('1.0', 'end-1c')\n if string:\n f.write(\"\\n\" + string)", "def save_exit(name, data):\n jrn_path = build_path(name)\n print(f'... saving new journal entries to {jrn_path} ...')\n with open(jrn_path, 'w') as file:\n for line in data:\n file.write(line + '\\n')\n print('... save complete ...')", "def _SaveEntries(self, entries):\n text = \"entries = \\\\\\n\" + pprint.pformat(entries, 2) + '\\n'\n file_path = os.path.join(self._root_dir, self._options.entries_filename)\n gclient_utils.FileWrite(file_path, text)", "def exportJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n fileName = self.journals[x]['name']\n fileName += \".txt\"\n options = QtGui.QFileDialog.DontResolveSymlinks | QtGui.QFileDialog.ShowDirsOnly\n directory = QtGui.QFileDialog.getExistingDirectory(None, \"Select directory to save file\", os.getenv('HOME'), options)\n if directory:\n fileName = directory + \"/\" + fileName\n print (\"Exporting: to \" + fileName)\n data = self.journals[x]['journal']\n f = open(fileName, 'w')\n f.write(data)\n f.close()\n self.log += \"Journal \" + fileName + \" exported\"\n QtGui.QMessageBox.information(None, \"Journal Export\", str(fileName)+\" exported\\n\")", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def __save_article_to_file(self, content):\n with open(\"article.txt\", 'w') as out:\n out.write(content)", "def write_entries(self, entries):\n for entry in entries:\n self.write(entry)", "def save(*messages):\n data = Parser.parse_texts(*messages[1:])\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n title = Parser.parse_text(messages[0])\n\n file = open(\"./logs/\"+threading.currentThread().getName()+today+\".log\",'a+')\n file.write(\"\\n==\"+title+hour+\"==\\n\")\n if type(data) is dict: #Dictionary with each value being a triplet. From get_all_items\n for key in data.keys():\n file.write(Parser.parse_text(key) + \" -> \"+ Parser.parse_text(str(data[key].x)) +\"\\n\")\n elif type(data) is list: #From get_item, market item, attribute listings\n for listing in data:\n file.write(str(listing.id)+\" - \"+str(listing.price/100)+\" euros\\n\")\n else: #plain text\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def save_file(self):\n f = open(self._file_name, \"w\")\n try:\n for rental in self.list:\n rental_str = self.obj_to_string(rental)\n f.write(rental_str)\n f.close()\n except Exception as e:\n raise e", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def write_to_file(self, content):\n try:\n with open(self.full_path_to_file, \"wb\") as fp:\n fp.write(content)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()\n self.logger.info(\"News has been successfully converted\")", "def save_to_file():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n new_file = [] # save edited information to new_file list for writing to csv file\n for x in range(len(entries)):\n new_row = []\n for y in range(len(entries[x])):\n new_row.append(entries[x][y].get())\n new_file.append(new_row)\n\n file_to_save = value.get() # get name of file to write edited content to\n file_path = lrs_path + file_to_save\n with open(file_path, \"w\") as the_file:\n writer = csv.writer(the_file, lineterminator=\"\\n\")\n for line in new_file:\n writer.writerow(line)\n\n messagebox.showinfo(\"Message\", \"File has been saved.\", parent=app_frame)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write_to_file(self, papers, filename):\n\t\tpass", "def save_logs(self, mode):\n\t\tif mode == \"c\":\n\t\t\twith open(LOGS_FULL_PATH, 'wb') as f:\n\t\t\t\tpickle.dump(self.LOGS, f, pickle.HIGHEST_PROTOCOL)\n\t\telif mode == \"a\":\n\t\t\twith open(ARCHIVES_FULL_PATH, 'wb') as f:\n\t\t\t\tpickle.dump(self.ARCHIVES, f, pickle.HIGHEST_PROTOCOL)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def save(self):\n\n print('Bye!')\n try:\n with open('records.txt', 'w') as fh:\n fh.write(str(self._initial_money))\n for n in self._records:\n fh.write('\\n'+n)\n except OSError:\n sys.stderr.write('Cannot open file.\\n')", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))\n return True", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()", "def save(self, content, address, file):\n full_address = \"/home/red/WAREHOUSE\" + address\n file_path = full_address + \"/\" + file\n\n try:\n os.makedirs(full_address, 0o777, True)\n except OSError:\n pass\n\n write_binary_file(content, file_path)", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)" ]
[ "0.71133924", "0.70640576", "0.68299794", "0.68087065", "0.6638848", "0.6473705", "0.64459836", "0.61746615", "0.61529505", "0.61082065", "0.6088982", "0.60406435", "0.60406435", "0.5983607", "0.5972086", "0.59578437", "0.5955684", "0.59284395", "0.58410317", "0.58036464", "0.57855684", "0.57762414", "0.57509446", "0.57506305", "0.574192", "0.57091504", "0.5706521", "0.5703111", "0.5701415", "0.56929386" ]
0.8384327
0
Load journal entries from a file.
def load(journal: Journal, file: Path) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def load_file(self):\n try:\n f = open(self._file_name, \"r\")\n line = f.readline()\n while len(line) > 0:\n super(RentalHistoryText, self).add_rental(self.string_to_obj(line))\n line = f.readline()\n f.close()\n except IOError as e:\n raise e", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n sub = self.__createSubjectFromLine(line)\n # invoke the store method from the base class\n SubjectsRepo.store_subject(self, sub)\n fh.close()", "def load_from_file(self, file_path):\n for line in open(file_path, 'r'):\n term = line.rstrip('\\n')\n self.add(term)", "def load_file(self, file_path):\n with open(file_path, \"r\") as mappings_file:\n for raw_line in mappings_file:\n line = raw_line.split()\n # Add new record to the records dictionary.\n new_record = Record(line[0], line[1], line[2], line[3])\n self.add_record(new_record)", "def load_journal_json(self, absolute_path):\n with open(absolute_path) as json_file:\n data = json.load(json_file)\n\n return data", "def from_file(self, filename=None):\n if not self.name:\n #we don't have a file associated with the EntryList:\n if not filename:\n print \"UNKNOWN FILE!\"\n exit\n else:\n self.name = filename\n \n elif filename and filename != self.name:\n #ambiguous which file to use\n print \"different file than what log was initialized with\"\n exit\n \n else:\n #we have an original filename and none passed in\n #or the original filename equals the one passed in\n #should be good to go\n pass\n\n if os.path.exists(self.name):\n\n #f = open(self.name, \"U\")\n #2009.04.02 20:44:31 \n #very strange behavior when opening up utf-8 files\n #characters get reincoded\n #this is especially prominent when using check_feed.py\n #was using latin_1... going back to utf-8\n #f = codecs.open(self.name, encoding='latin_1')\n #codecs.ignore_errors(UnicodeDecodeError) \n f = codecs.open(self.name, encoding='utf-8', errors='ignore')\n\n self.write(f.read())\n f.close\n\n self.seek(0)\n\n else:\n print \"NO FILE ASSOCIATED WITH LOG: %s\" % self.name", "def read (self, path):\n\n\t\tself.data = []\n\t\t# print \"*** path: %s***\" % path\n\t\tdir, filename = os.path.split (path)\n\t\troot, ext = os.path.splitext (filename)\n\t\t# encoding = 'ISO-8859-1' # utf-8\n\t\ts = codecs.open(path,'r', self.encoding).read()\n\t\t## s = unicode(f.read(),'utf-8')\n\t\ts = self.preprocess (s)\n\t\tlines = split (s, self.linesep)\n\t\tschema = self.splitline(lines[0])\n\n\t\t## print \"** %s **\" % os.path.splitext(filename)[0]\n\t\tif self.verbose:\n\t\t\tprint \"read %d lines from %s\" % (len(lines), path)\n\n\t\tfor i in range(1,len(lines)):\n\t\t\tif not lines[i].strip(): \n\t\t\t\t# print 'skipping line (%d)' % i\n\t\t\t\tcontinue\n\t\t\tfields = self.splitline(lines[i])\n\t\t\titem = self.entry_class (fields, schema)\n\t\t\tif self.accept (item):\n\t\t\t\tself.add (item)\n\n\t\tself.schema = schema\n\t\t# self.data.sort (lastNameCmp)", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def save(journal: Journal, file: Path) -> None:\n with open(file, \"w\") as output:\n output.writelines(f\"{entry}\\n\" for entry in journal.get_entries())", "def load_file(filename):\n with open(path.join(PATH_ROOT, filename), \"r\") as in_file:\n return in_file.readlines()", "def loadFile(self, path):\n books_added = 0\n records_added = 0\n books_to_clean = set()\n\n PDEBUG('Loading from file: %s', path)\n\n with open(path) as fd:\n while True:\n content = fd.read(PAGE_SIZE)\n if content is None:\n break\n if len(content) == 0:\n break\n pos = 0\n while True:\n m = R_MATCH_ENTRY.search(content, pos)\n if m is None:\n new_content = fd.read(PAGE_SIZE)\n if len(new_content) == 0:\n print('New books: %d, new records: %d' %\n (books_added, records_added))\n print('EOF reached...')\n return (books_added, records_added)\n else:\n content = content[pos:] + new_content\n pos = 0\n else:\n (book, author) = process_book_name(m.group(1))\n book = handleStr(book)\n author = handleStr(author)\n page = handleStr(m.group(2).strip())\n time = handleStr(m.group(3).strip())\n mark = handleStr(m.group(4).strip())\n pos = m.end(0)\n\n bts = book.encode()\n if bts[0:3] == codecs.BOM_UTF8:\n PDEBUG('oops: ')\n PDEBUG('%X-%X-%X', bts[0], bts[1], bts[2])\n\n sys.exit()\n\n if len(mark) == 0:\n continue\n\n res = R_MATCH_POS.match(page)\n if res is None:\n res = R_MATCH_PAGE.match(page)\n if res is None:\n PDEBUG('oops: %s -- %s', book, page)\n sys.exit(1)\n\n pos_str = res.group(1)\n typ_str = res.group(2)\n\n (new_book, new_clip) = \\\n self.__addEntry__(\n book, author, pos_str, typ_str, time, mark)\n\n if new_book:\n books_added += 1\n\n if new_clip:\n books_to_clean.add(book)\n records_added += 1\n\n if books_to_clean:\n PDEBUG('Books to clean: %s', books_to_clean)\n\n for book in books_to_clean:\n self.cleanUpBook(book)\n\n print('Total books added: %d, clips added:%d' %\n (books_added, records_added))\n\n return (books_added, records_added)", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def _load_file(self, log_file, message_name_filter_list):\n if isinstance(log_file, str):\n self._file_handle = open(log_file, \"rb\") #pylint: disable=consider-using-with\n else:\n self._file_handle = log_file\n\n # parse the whole file\n self._read_file_header()\n self._last_timestamp = self._start_timestamp\n self._read_file_definitions()\n\n if self._debug:\n print(\"header end offset: {:}\".format(self._file_handle.tell()))\n\n if self.has_data_appended and len(self._appended_offsets) > 0:\n if self._debug:\n print('This file has data appended')\n for offset in self._appended_offsets:\n self._read_file_data(message_name_filter_list, read_until=offset)\n self._file_handle.seek(offset)\n\n # read the whole file, or the rest if data appended\n self._read_file_data(message_name_filter_list)\n\n self._file_handle.close()\n del self._file_handle", "def load_posts():\n \n with open(FILE_NAME, 'r') as f:\n return pickle.load(f)", "def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content", "def load_file(file_name):\n with open(file_name,\"r\") as f:\n return f.readlines()", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def load_data(path):\n with open(path) as f:\n return f.readlines()", "def load_biblio(self, file_name, preload_ids=False, chunksize=1000):\n\n logger.info( \"Loading biblio data from [{}], with chunk size {}. Preload IDs? {}\".format(file_name, chunksize, preload_ids) )\n\n input_file = codecs.open(file_name, 'r', 'utf-8')\n biblio = json.load(input_file)\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n if (\"cx_oracle\" in str(self.db.dialect)):\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (:1, :2, :3)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (:1, :2, :3)')\n else:\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (%s, %s, %s)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (%s, %s, %s)')\n\n\n ########################################################################\n # STEP 1: If overwriting, find extant docs and pre-populate doc ID map #\n ########################################################################\n\n extant_docs = set()\n\n if self.overwrite or preload_ids:\n\n for chunk in chunks(biblio, chunksize):\n\n # Loop over all biblio entries in this chunk\n doc_nums = set()\n for bib in chunk[1]:\n\n input_pubnum = self._extract_pubnumber(bib)\n\n # Early return: don't bother querying if we already have an ID\n if input_pubnum in self.doc_id_map:\n extant_docs.add( input_pubnum ) \n continue\n\n doc_nums.add(input_pubnum)\n\n if len(doc_nums) == 0:\n continue\n\n self._fill_doc_id_map(doc_nums, sql_alc_conn, extant_docs)\n\n logger.info( \"Discovered {} existing IDs for {} input documents\".format( len(extant_docs),len(biblio)) )\n\n\n ########################################################\n # STEP 2: Main biblio record processing loop (chunked) #\n ########################################################\n\n for chunk in chunks(biblio, chunksize):\n\n logger.debug( \"Processing {} biblio records, up to index {}\".format(len(chunk[1]), chunk[0]) )\n\n new_doc_mappings = dict() # Collection IDs for totally new document \n overwrite_docs = [] # Document records for overwriting\n duplicate_docs = set() # Set of duplicates to read IDs for\n known_count = 0 # Count of known documents\n\n new_titles = []\n new_classes = [] \n\n doc_insert_time = 0\n\n\n transaction = sql_alc_conn.begin()\n\n for bib in chunk[1]:\n\n ########################################\n # STEP 2.1 Extract core biblio records #\n ########################################\n\n family_id, pubdate, pubnumber, assign_applic = self._extract_core_biblio(bib)\n\n life_sci_relevant = self._extract_life_sci_relevance(bib)\n\n\n ####################################################\n # Step 2.2 Overwrite or Insert the document record #\n ####################################################\n\n if pubnumber in extant_docs:\n\n known_count += 1\n\n if self.overwrite:\n # Create an overwrite record\n doc_id = self.doc_id_map[pubnumber] \n overwrite_docs.append({\n 'extant_id' : doc_id,\n 'new_published' : pubdate,\n 'new_family_id' : family_id,\n 'new_life_sci_relevant' : life_sci_relevant,\n 'new_assign_applic' : assign_applic })\n else:\n # The document is known, and we're not overwriting: skip\n continue\n\n else:\n \n # Create a new record for the document\n record = {\n 'scpn' : pubnumber,\n 'published' : pubdate,\n 'family_id' : family_id,\n 'assign_applic' : assign_applic,\n 'life_sci_relevant' : int(life_sci_relevant) }\n \n try:\n\n start = time.time()\n result = sql_alc_conn.execute( self.docs.insert(), record )\n end = time.time()\n\n doc_insert_time += (end-start)\n\n except Exception, exc:\n\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n elif self.allow_document_dups:\n\n # It's an integrity error, and duplicates are allowed.\n known_count += 1\n duplicate_docs.add(pubnumber)\n\n # Reset transaction\n transaction.commit()\n transaction = sql_alc_conn.begin()\n continue \n\n else:\n\n raise RuntimeError(\n \"An Integrity error was detected when inserting document {}. This \"\\\n \"indicates insertion of an existing document, but duplicates have been disallowed\".format(pubnumber))\n\n\n doc_id = result.inserted_primary_key[0] # Single PK\n new_doc_mappings[pubnumber] = doc_id\n\n self._extract_detailed_biblio(bib, doc_id, new_classes, new_titles, pubnumber)\n\n # Commit the new document records, then update the in-memory mapping with the new IDs\n transaction.commit()\n self.doc_id_map.update(new_doc_mappings)\n\n logger.info(\"Processed {} document records: {} new, {} duplicates. DB insertion time = {:.3f}\".format( len(chunk[1]), len(new_doc_mappings), known_count, doc_insert_time))\n\n\n ########################################################\n # STEP 2.2: Deal with document overwrites / duplicates #\n ########################################################\n\n if len(overwrite_docs) > 0:\n\n transaction = sql_alc_conn.begin()\n\n # Update the master record for the document that's being overwritten\n stmt = self.docs.update().\\\n where(self.docs.c.id == bindparam('extant_id')).\\\n values(published=bindparam('new_published'), \n family_id=bindparam('new_family_id'), \n life_sci_relevant=bindparam('new_life_sci_relevant'),\n assign_applic=bindparam('new_assign_applic'))\n\n sql_alc_conn.execute(stmt, overwrite_docs)\n\n # Clean out ALL other references to the document, for re-insertion\n delete_ids = [record['extant_id'] for record in overwrite_docs]\n\n stmt = self.titles.delete().where( self.titles.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.classes.delete().where( self.classes.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.chem_mapping.delete().where( self.chem_mapping.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n transaction.commit()\n\n logger.info(\"Overwrote {} duplicate documents (master doc record updated, all other references deleted)\".format(len(overwrite_docs)))\n\n if len(duplicate_docs) > 0:\n self._fill_doc_id_map(duplicate_docs, sql_alc_conn)\n\n logger.info(\"Read {} IDs for duplicate documents\".format(len(duplicate_docs)))\n\n ########################################################\n # STEP 2.3: Bulk insertion of titles / classifications #\n ########################################################\n\n\n # Bulk insert titles and classification\n if self.load_titles:\n title_ins.execute(new_titles)\n logger.debug(\"Insertion of {} titles completed\".format(len(new_titles)) )\n\n if self.load_classifications:\n classes_ins.execute(new_classes)\n logger.debug(\"Insertion of {} classification completed\".format(len(new_classes)) )\n\n # END of main biblio processing loop\n\n # Clean up resources\n title_ins.close()\n classes_ins.close()\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Biblio import completed\" )", "def load(logFile):\n pass #TODO", "def load_list_of_entries(list_of_files):\n publication_entries = []\n entries = []\n titles = []\n\n for filename in list_of_files:\n entries_list = load_entries(filename)\n\n for e in entries_list:\n if e.main_publication:\n publication_entries.append(e)\n elif e.title not in titles:\n titles.append(e.title)\n entries.append(e)\n\n return publication_entries, entries", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()", "def _load(self, filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n self.events = list(reader)", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n st = self.__createStudentFromLine(line)\n # invoke the store method from the base class\n StudentsRepo.store_student(self, st)\n fh.close()" ]
[ "0.7394419", "0.64924157", "0.62799805", "0.6213144", "0.6060179", "0.6032827", "0.5937667", "0.5873339", "0.5792897", "0.57746816", "0.57655644", "0.57224065", "0.56214803", "0.5567517", "0.5565053", "0.5565053", "0.5560204", "0.55173564", "0.54818314", "0.54786164", "0.54533386", "0.5433665", "0.54236835", "0.5408636", "0.5350907", "0.5341112", "0.53344345", "0.53302884", "0.53109455", "0.5304969" ]
0.81673855
0
Load journal entries from a URI.
def load_from_web(journal: Journal, uri: str) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load(journal: Journal, file: Path) -> None:", "def get_entries(uri):\n if not uri.endswith('/entries'):\n uri += '/entries'\n results = VGOCache(uri).results\n\n results = [ adjust_entry(x) for x in results ]\n return results", "def fetchJournalEntries(date):\n\t\n\tpattern = '%d/%m/%Y'\n\tdatetime_object = datetime.datetime.strptime(date, pattern)\n\t\n\t#Getting the feeds from respective feed functions\n\tslackFeed = getFromSlack(datetime_object)\n\twebServiceFeed = getFromWebService(datetime_object)\n\tgithubFeed = getFromGitService(datetime_object)\n\tdynamoFeed = getFromDynamo(datetime_object)\n\t\n\t#Combining feeds into a single output\n\tentireFeed = reconcileFeed(slackFeed, webServiceFeed, githubFeed, dynamoFeed)\n\t\n\treturn entireFeed", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def loadFromStream(self, stream, uri=None):\n self.loadFromDom(parseStream(stream))", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def load_journal_json(self, absolute_path):\n with open(absolute_path) as json_file:\n data = json.load(json_file)\n\n return data", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def load_history_entries(self, *entries):\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]", "def fetch_from_file(self, path):\n print(\"Fetching from %s ...\" % path)\n try:\n lines = open(path).readlines()\n except Exception, e:\n print(\"Failed to fetch from %s: %s\" % (path, e))\n return False\n\n self.domain_list = []\n for line in lines:\n domain_line = line.rstrip()\n if domain_line != '':\n self.domain_list.append(domain_line)\n\n print(\"Got %d domains\" % len(self.domain_list))\n return True", "def loadArtworks(catalog):\n artfile = cf.data_dir + 'Artworks-utf8-large.csv'\n input_file = csv.DictReader(open(artfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)", "def articles():\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM categories\n INNER JOIN entries ON\n entries.slug = categories.slug AND\n entries.published = categories.published\n WHERE categories.category='{category}'\n ORDER BY entries.published DESC\n \"\"\".format(category='article'))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def load(self, uri):\r\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\r\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\r\n\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\r\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\r\n\r\n self._latent_space = dict_options['latent_space']\r\n self._input_cells = dict_options['input_cells']", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def LoadArtIntoDB(store,art):\n if 'srcorgname' in art and art['srcorgname'] is not None:\n srcorg = Misc.GetOrgID( art[ 'srcorgname' ] )\n else:\n # no publication specified - look up using domain name\n o = urlparse.urlparse(art['permalink'])\n domain = o[1].lower()\n srcorg = Publication.find_or_create(domain)\n art['srcorg'] = srcorg\n\n\n # resolve bylined authors to journo ids\n expected_journo = None\n authors = Byline.CrackByline(art['byline'])\n attributed = []\n for author in authors:\n attributed.append(Journo.find_or_create(author, art, expected_journo))\n art['journos'] = attributed\n\n# if opts.test:\n# ukmedia.PrettyDump( art )\n\n article_id = store.upsert( art )\n\n return article_id", "def loadArtists(catalog):\n artistsfile = cf.data_dir + 'MoMA/Artists-utf8-10pct.csv'\n input_file = csv.DictReader(open(artistsfile, encoding='utf-8'))\n for artist in input_file:\n model.addArtist(catalog, artist)", "def _handle_import(contents, use_tags, owner):\n \n lines = contents.decode(\"utf-8\").split(\"\\n\")\n \n title = re.compile(r\"<a.*?>(.+?)</a>\", re.I)\n url = re.compile(r\"\"\"<a.*href=['\"](.+?)['\"]\"\"\", re.I)\n tags = re.compile(r\"\"\"<a.*?tags=[\"'](.+?)[\"']\"\"\", re.I)\n addTime = re.compile(r\"\"\"<a.*?add_date=[\"'](\\d+?)[\"']\"\"\", re.I)\n \n for l in lines:\n if \"<a\" in l.lower() and \"</a>\" in l.lower():\n bookmark = {}\n \n bookmark[\"title\"] = title.search(l)\n if not bookmark[\"title\"]:\n continue\n bookmark[\"title\"] = _unescape(bookmark[\"title\"].group(1))\n \n bookmark[\"url\"] = url.search(l)\n if not bookmark[\"url\"]:\n continue\n bookmark[\"url\"] = _unescape(bookmark[\"url\"].group(1))\n \n bookmark[\"tags\"] = [];\n if use_tags:\n result = tags.search(l)\n if result:\n bookmark[\"tags\"] = map(_unescape, result.group(1).split(\",\"))\n \n bookmark[\"added\"] = addTime.search(l)\n if bookmark[\"added\"]:\n bookmark[\"added\"] = bookmark[\"added\"].group(1)\n \n if not Bookmark.objects.filter(owner=owner, url=bookmark[\"url\"]).exists():\n bm = Bookmark(owner=owner, url=bookmark[\"url\"], title=bookmark[\"title\"])\n \n bm.save()\n if bookmark[\"added\"]:\n bm.added = datetime.datetime.fromtimestamp(int(bookmark[\"added\"]))\n \n for t in bookmark[\"tags\"]:\n bm.tag(t)\n \n bm.save()\n bm.autotag_rules()", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def get_entries_by_url(self, url, regex=False, flags=None,\n group=None, history=False, first=False): \n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n return self.database.find_entries_by_url(url, \n regex, \n flags, \n group, \n history, \n first)", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def getContents(self, itemId, itemURI, *args):\n if args:\n actionId = self._db.addAction(args[0]) \n else:\n actionId = -1\n\n print('\\t\\t[%s] %s\\t(%s)' % (itemId, itemURI, actionId))\n \n # dissect the file\n patURL = re.compile(r'URL=(?P<url>.*$)', re.IGNORECASE)\n patHttp = re.compile(r'(?P<url>http.*$)', re.IGNORECASE)\n patFtp = re.compile(r'(?P<url>ftp.*$)', re.IGNORECASE)\n\n f = open(itemURI,\"r\")\n url = ''\n idx = -1\n\n for line in f:\n idx += 1\n m = patURL.match(line)\n if not m:\n m = patHttp.match(line)\n\n if not m:\n m = patFtp.match(line)\n\n if m:\n url = m.group('url')\n itemIdRight = self._db.addItem(self._engine_id, url, datetime.datetime.now(), args)\n self._db.addItemLink(self._engine_id, itemId, itemIdRight, 'Contains')\n \n # we have a URI, down we wnat to action it, use the tail value to set the action:\n self._db.addItemEvent(self._engine_id, actionId, itemIdRight)\n\n self._db.addItemData(itemId, 'Contents', line, idx)", "def load_article(paper_id):\n s3 = boto3.client('s3')\n key = 'json/%s.json' % paper_id\n LOGGER.info('bucket=%s, key=%s', ARTICLE_BUCKET_NAME, key)\n obj = s3.get_object(\n Bucket=ARTICLE_BUCKET_NAME,\n Key=key)\n body = obj['Body']\n article = json.loads(body.read())\n body.close()\n return article", "def load(self, url):\n pass", "def load(self, url):\n pass", "def GetDocumentListEntry(self, uri):\n return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)" ]
[ "0.6321398", "0.6246365", "0.5560296", "0.5538123", "0.53121865", "0.5294257", "0.5108225", "0.51056397", "0.50977325", "0.50559926", "0.5041016", "0.49502626", "0.49426818", "0.49289203", "0.49164444", "0.48940632", "0.48866275", "0.48804903", "0.48804903", "0.48515296", "0.48508194", "0.4849793", "0.48485938", "0.48274857", "0.48256782", "0.4816751", "0.48116228", "0.48115733", "0.48115733", "0.48056406" ]
0.75825113
0
Spawning next generation of collection by selecting n pairs of distinct forests from previous generation and them over.
def _next_generation(self, previous_generation): self._fullInput, self._fullOutput = previous_generation.get_data() self.power = self.settings.population_count for forest_iteration in range(self.power): first, second = previous_generation.selection() print 'selected for crossover ->', first.fitness, second.fitness self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _next_gen(self):\n\n selected = self.select()\n offspring = self.population.mate(mating_individuals=selected)\n self.population.delete(np.arange(len(self.population.individuals)))\n self.population.add(offspring)\n self._current_gen_count += 1\n self._gen_count_in_curr_iteration += 1\n self._function_evaluation_count += offspring.shape[0]", "def next_generation(self, population):\n pass", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def nextGeneration(self):\n # select two parents from the current generation.\n parent_1 = self.selection()\n parent_2 = self.selection()\n # to not get the same parents.\n _ = 0\n while _ < 30 and parent_2 == parent_1:\n parent_2 = self.selection()\n _ += 1\n # apply crossover on those parents (crossover_rate chance).\n crossover_chance = random.uniform(0, 1)\n parents = [parent_1, parent_2]\n if crossover_chance <= self.crossoverRate:\n offspring = self.crossover(parents)\n else:\n return \n # apply mutations on the new offspring (mutation_rate chance).\n mutation_chance = random.uniform(0, 1)\n newoffspring = offspring\n if mutation_chance <= self.mutationRate:\n newoffspring = self.mutation(offspring)\n # replace one of the parents in the new generation, given the loser parent.\n self.replaceLoser(parents, newoffspring)\n\n # now the new generation is available in the self.currentGeneration", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def next_generation(self):\r\n self.calculate_stats()\r\n\r\n self.population = []\r\n\r\n # Getting amounts for different types of neural net replacements\r\n random_size = self.random_round(self.population_size * self.settings[\"random_offspring\"])\r\n elitism_size = self.random_round(self.population_size * self.settings[\"elitism_offspring\"])\r\n crossover_size = self.population_size - random_size - elitism_size\r\n\r\n # Keeping best neural nets (elitism)\r\n self.population.extend(self.sorted_population[i].copy() for i in range(elitism_size))\r\n\r\n # Adding neural nets with crossover\r\n\r\n probs = self._get_selection_probabilities()\r\n crossovers = (self._uniform_crossover(*np.random.choice(self.sorted_population, 2, replace=False, p=probs)) for _ in range(crossover_size))\r\n self.population.extend(crossovers)\r\n\r\n # Mutating neural nets\r\n for neural_net in self.population:\r\n if np.random.rand() < self.settings[\"mutation_rate\"]:\r\n neural_net.mutate(self.settings[\"mutation_chance\"], self.settings[\"mutation_amount\"])\r\n\r\n # Adding random nets\r\n self.population.extend(self._random_child() for _ in range(random_size))\r\n\r\n # Shuffling new population\r\n np.random.shuffle(self.population)\r\n\r\n # Increment current generation\r\n self.current_generation += 1", "def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def next_generation(self):\n new_population = self.population.copy()\n new_length = self.tour_length.copy()\n for i in range(self.loops):\n order_a = self.pick_one()\n order_b = self.pick_one()\n order = self.crossover(order_a, order_b)\n order_length = self.distance(order)\n new_population[i], new_length[i] = self.mutate(order_length, order)\n if new_length[i] < self.worst:\n self.tour_length[self.worst_pos] = new_length[i]\n self.population[self.worst_pos] = new_population[i]\n self.fitness[self.worst_pos] = 1/new_length[i]\n self.normalise()\n self.worst = 0\n for j in range(self.loops):\n if self.worst < self.tour_length[j]:\n self.worst = self.tour_length[j]\n self.worst_pos = j\n return new_population, new_length", "def generate_next_generation(environment, population, adaptive_mutation):\n\t# generate pairs of parents that can be used for recombination\n\tparent_pairs = parent_selection_ranking(population, num_pairs=len(population)*4)\n\n\t# generate offspring\n\toffspring = []\n\tfor i in range(len(parent_pairs)):\n\t\tchildren = create_offspring(environment, parent_pairs[i][0], parent_pairs[i][1], adaptive_mutation, num_offspring=1)\n\t\toffspring += children # concatenate children to offspring list\t\n\n\tnew_population = survival_selection_top(offspring, len(population))\n\treturn new_population", "def generation_next(prev_gen):\n next_gen = []\n\n # Iter through list of graphs\n for original_graph in prev_gen:\n # Select edges to nodes which are at distance 2\n select_edges = dist2_nodepairs(original_graph)\n\n # Go through the list of possible selected edges and add one\n for test_edge in select_edges:\n test_graph = original_graph.copy()\n test_graph.add_edge(*test_edge)\n if (not graph_exists(test_graph, next_gen)) \\\n and check_test_graph(test_graph):\n next_gen.append(test_graph)\n\n return next_gen", "def next_population():\n result = [best]\n while len(result) < population_size:\n chromosomes = crossover(tournament(), tournament()) if random() < crossover_rate else [tournament()]\n for chromosome in chromosomes:\n for i in range(box_count):\n if random() < mutation_rate:\n j = randrange(box_count)\n (chromosome[i], chromosome[j]) = (chromosome[j], chromosome[i])\n result.append(Individual(evaluate(chromosome), chromosome))\n return result[:population_size]", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]", "def create_next_gen(self, parents_sreprs_couple):\n child0, child1 = self.recombine(parents_sreprs_couple[0], parents_sreprs_couple[1])\n if random.random() < self.mutate_prob:\n child0 = self.mutate(child0)\n if random.random() < self.mutate_prob:\n child1 = self.mutate(child1)\n\n return child0, child1", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def _pop_random_n(entities: np.array, weights: np.array, count: int = 3):\n for _ in range(count):\n if not len(entities):\n return\n\n choice, entities, weights = _pop_random(entities, weights)\n yield choice", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def nth_iteration(Iterations, Moves_ahead, GA_iterations, n_samples,\n current_gen_spectra, next_gen_conc, x_test,\n conc_array_actual, spectra_array_actual, seed,\n median_fitness_list, max_fitness_list,\n iteration, mutation_rate_list, fitness_multiplier_list):\n set_seed(seed)\n mutation_rate, fitness_multiplier, best_move, best_move_turn, \\\n max_fitness, surrogate_score, desired_1, current_gen_spectra_1, \\\n best_conc_array, \\\n dictionary_of_moves = MCTS(Iterations, Moves_ahead,\n GA_iterations, current_gen_spectra,\n next_gen_conc, x_test, conc_array_actual,\n spectra_array_actual, seed, n_samples)\n print('The best move has a fitness value of', max_fitness)\n print('The best move occurs in', best_move_turn, 'turns.')\n print()\n print('The surrogate model has a score of:', surrogate_score)\n print()\n mutation_rate_list.append(mutation_rate)\n fitness_multiplier_list.append(fitness_multiplier)\n current_gen_spectra = current_gen_spectra.T\n current_gen_spectra = MinMaxScaler().fit(current_gen_spectra). \\\n transform(current_gen_spectra).T\n next_gen_conc, median_fitness, max_fitness = perform_iteration(\n current_gen_spectra, next_gen_conc, x_test, 20,\n n_samples, mutation_rate, fitness_multiplier)\n best_conc_array = \\\n best_conc_array[np.argsort(best_conc_array[:, -1])][-1, :]\n print(next_gen_conc)\n return mutation_rate, fitness_multiplier, mutation_rate_list, \\\n fitness_multiplier_list, best_move, best_move_turn, \\\n max_fitness, surrogate_score, next_gen_conc, \\\n best_conc_array, dictionary_of_moves", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n g1, g2, g3, cur, ind = 1, 2, 3, 0, 3\n if n < 3:\n return n\n else:\n while ind < n:\n cur = g3 + 2 * g2 + 3 * g1\n ind += 1\n g1, g2, g3 = g2, g3, cur\n return g3", "def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories", "def next_population(population, w, h, N):\r\n t = lambda entity: entity[1]\r\n population.sort(key=t, reverse=True)\r\n upper_half = population[:len(population)//2]\r\n mutated_half = []\r\n for entity in upper_half:\r\n new_entity = mutate(w, h, entity[0][:], 0.1) #0.1 as p seems to be pretty good in this config.\r\n new_entity = (new_entity[:], count_numbers(gen_board(w, h, new_entity), N))\r\n mutated_half.append(new_entity)\r\n return upper_half+mutated_half", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def cycles(n, support, randomize=False):\n support = np.array(support)\n\n def gen(p):\n g = combinations(support, n)\n if randomize:\n g = list(g)\n random.shuffle(g)\n\n for local_support in g:\n for output_p in all_permutations(local_support)(p):\n yield output_p\n\n return gen", "def getNextGeneration(self, chromosomes: ChromList) -> ChromList:\n parents = self.select(chromosomes)\n offspring = self.crossover(parents)\n offspring = self.mutate(offspring)\n return parents + offspring", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')" ]
[ "0.66252106", "0.61852103", "0.61620736", "0.6136829", "0.61138487", "0.6082674", "0.6079343", "0.6030366", "0.6008962", "0.5985152", "0.5984775", "0.5952923", "0.58702976", "0.5806566", "0.57817847", "0.5779176", "0.57665956", "0.57347685", "0.57222146", "0.5704095", "0.5702277", "0.56958437", "0.564434", "0.5606501", "0.5602759", "0.55948955", "0.5594479", "0.55844074", "0.55707294", "0.55663884" ]
0.6936438
0
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def learn(self):\n for a in self.agents:\n a.learn()", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)", "def run_all(self):\n # print(\"running all nodes\")\n executed = set()\n node_update_states = {node: node.block_updates for node in self.flow_view.node_items}\n\n def traverse_upwards(node):\n # Traverse upwards to the top of data flow graph\n if node in executed:\n return\n for port in node.inputs:\n for connection in port.connections:\n traverse_upwards(connection.out.node)\n # print(\"executing\", node)\n node.update_event()\n executed.add(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = True\n\n for node in self.flow_view.node_items:\n traverse_upwards(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = node_update_states[node]\n # print(\"All nodes executed\")", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def run_all():\n db = DBInterface()\n year = Config.get_property(\"league_year\")\n session = Session(bind=db.engine)\n\n scraper.scrape_all(db, session, year)\n session.commit()\n\n bets.predict_all(db, session)\n session.commit()\n session.close()", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def do_make_(self):\n global g_list_of_classifier\n\n for ite_clf in g_list_of_classifier:\n ite_clf.learn()\n return ''", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def forest(self):\n\n forest_parameters = [{'n_estimators': hel.powerlist(10, 2, 4),\n 'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1], 'n_jobs': [-1]}]\n forest_grid = GridSearchCV(estimator=RandomForestRegressor(),\n param_grid=forest_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n forest_grid_result = forest_grid.fit(self.X_train, self.y_train)\n best_forest_parameters = forest_grid_result.best_params_\n forest_score = forest_grid_result.best_score_\n print('Best forest params: ' + str(best_forest_parameters))\n print('Forest score: ' + str(forest_score))\n return RandomForestRegressor(\n n_estimators=best_forest_parameters['n_estimators'],\n min_samples_leaf=best_forest_parameters['min_samples_leaf'],\n criterion=best_forest_parameters['criterion'],\n random_state=1, n_jobs=-1)", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def loadall(bot) :\n for feature in features :\n load(bot, feature)" ]
[ "0.66012484", "0.62504995", "0.61932045", "0.6135642", "0.6134355", "0.61263996", "0.6049594", "0.5867963", "0.5865974", "0.5832648", "0.581788", "0.5802029", "0.57808244", "0.5779419", "0.5708533", "0.56901664", "0.5634386", "0.56181246", "0.558044", "0.553923", "0.55354095", "0.55076337", "0.5501047", "0.546172", "0.54554665", "0.5449654", "0.54465944", "0.5430582", "0.54302466", "0.5425708" ]
0.7236054
0
Just mutating every forest in collection.
def mutate(self): for forest in self._forests: forest.mutate(self._fullInput)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()", "def update(self):\n map(lambda x: x.update(), self._children.values())", "def update (self) :\n for met in self.gene :\n met(self)", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def update(self, iterable):\n self._update_nodes(iterable)", "def _reset(base: pymongo.database.Database) -> None:\n if base:\n for collection in base.list_collection_names():\n _reset_collection(base, collection)", "def resetWeights(T):\n T.children = [(t,0) for t in T.children]\n for t,w in T.children:\n resetWeights(t)", "def reassignWeights(self,weights):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight = weights[i]", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def move_to_collection(self, destination_collection):\n for entity in self:\n entity.move_to_collection(destination_collection)", "def reset(self):\n for c in self.children:\n c.reset()\n self.marked = False", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def sync_territories(self):\n for territory_state in self.territory.all():\n territory_state.sync()", "def update(self) -> None:\n\t\t# Clear attributes that will be updates\n\t\tself.node_names: List[str] = []\n\t\tself.subnode_names: Dict[str, Set[str]] = {}\n\t\t# Iterate over RootNodes\n\t\tname: str\n\t\ts_name: str\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in rootnode.nodes:\n\t\t\t\tself._update_with_node(node)\n\t\t\tif len(rootnode.subnodes):\n\t\t\t\t# Create Set in subnode_names for the RootNode's SubNodes\n\t\t\t\tself.subnode_names[rootnode.name] = set()\n\t\t\t\t# Iterate over SubNodes\n\t\t\t\tfor subnode in rootnode.subnodes:\n\t\t\t\t\tself.subnode_names[rootnode.name].add(subnode.name)", "def update_all_readings(self):\n\n # update the reading of all nodes\n for node_name in self.nodes:\n\n # update the readings of all nodes\n self.nodes[node_name].reading()\n\n # once all nodes have updated, they can be stabilized\n for node_name in self.nodes:\n\n self.nodes[node_name].stabilize()", "def _mutate(self, p_mutate, mutation):\n self.children = mutation(self.children, p_mutate)", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def unselectAll(self):\n\t\tself.tree.UnselectAll()", "def setSubtreeBF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n queue = deque(izip(repeat(self, len(self[1:])), count(1)))\n for i in xrange(index):\n elem = queue.popleft()\n parent = elem[0]\n child = elem[1]\n if isinstance(parent[child], Tree):\n tree = parent[child]\n queue.extend(izip(repeat(tree, len(tree[1:])), count(1)))\n parent[child] = subtree", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def randomize(self):\n for network in self.networks.values():\n network.database = []\n self.env = Environment(self.networks)", "def copy_many_to_temp(self,\r\n sourcerange=None):\r\n\r\n if sourcerange is None:\r\n sourcerange = []\r\n\r\n for a_temp in sourcerange:\r\n\r\n self.copy_to_temp(a_temp,\r\n self.tempobject)", "def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()", "def _update(self, features: DataFrameLike) -> None:\n # add features\n self._features = (\n pd.concat([self._features, features], axis=1, sort=True)\n # fill nans resulting from concatenation where features does not\n # contain neighborless nodes (out-degree=0) on its axis\n .fillna(0)\n )\n # prune redundant features\n pruner = FeaturePruner(self._final_features, self._feature_group_thresh)\n features_to_drop = pruner.prune_features(self._features)\n self._features = self._features.drop(features_to_drop, axis=1)\n # save features that remain after pruning and that\n # have not previously been saved as final features\n retained = features.columns.difference(features_to_drop)\n feature_dict = as_frame(self._features[retained]).to_dict()\n self._final_features[self.generation_count] = feature_dict", "def reset_bag(self):" ]
[ "0.59149694", "0.5712289", "0.54862785", "0.54687923", "0.5425373", "0.54242694", "0.5323184", "0.5317251", "0.52714866", "0.5256046", "0.525473", "0.524264", "0.52372867", "0.5176812", "0.51745", "0.5156172", "0.51512945", "0.51447666", "0.50903946", "0.5075218", "0.5069291", "0.50478506", "0.5037829", "0.5032198", "0.5025653", "0.5021753", "0.5008239", "0.50028604", "0.5002316", "0.50009376" ]
0.7783784
0
Query a SGL di un sensore del traffico Vedi query_ensor() per sensorURI, fromTime e toTime
def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False): values = ["count", "sumSpeed"] result = None for v in values: # data = query_ensor(sensorURI, fromTime, toTime, v) data = multiday_query(sensorURI, fromTime, toTime, v) df = pd.DataFrame(data, columns=["measuredTime", v]) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.index = df["measuredTime"] del df["measuredTime"] if remove_outliers: z_scores = np.abs(stats.zscore(df)) print(f"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}") df = df[(z_scores < 3).all(axis=1)] if resampleFreq is not None: df = df.resample(resampleFreq).sum() if result is not None: result = pd.merge_ordered(result, df, left_on="measuredTime", right_on="measuredTime") result.index = result["measuredTime"] del result["measuredTime"] else: result = df # avg speed result["avgSpeed"] = result["sumSpeed"] / result["count"] result.loc[~np.isfinite(result["avgSpeed"]), "avgSpeed"] = np.nan result["avgSpeed"] = result["avgSpeed"].interpolate() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_ensor(sensorURI, fromTime, toTime, valueName):\n\n s = f\"https://smartgardalake.snap4.eu/ServiceMap/api/v1/?serviceUri={sensorURI}&fromTime={fromTime}&toTime={toTime}&valueName={valueName}\"\n print(s)\n response = requests.get(s)\n data = response.json()\n values = []\n try:\n values = data[\"realtime\"][\"results\"][\"bindings\"]\n except KeyError:\n print(\"[WARN] empty dataset\")\n values.reverse()\n result = {\n \"measuredTime\": [],\n valueName: [],\n }\n print(len(values))\n for i in range(len(values)):\n v = values[i]\n result[\"measuredTime\"].append(v[\"measuredTime\"][\"value\"])\n try:\n float_measure = float(v[valueName][\"value\"])\n if valueName == \"CO2\" and float_measure > 2000:\n result[valueName].append(np.nan)\n else:\n result[valueName].append(float_measure)\n except ValueError:\n result[valueName].append(np.nan)\n return result", "def read_sensors():\n previous_time = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n delta = now - previous_time\n if delta.seconds >= sample_frequency:\n previous_time = now\n \n # Read SGP30.\n eCO2_data = sgp30.eCO2\n tvoc_data = sgp30.TVOC\n\n # Read VEML6070 and VEML7700, sample ten times.\n for j in range(10):\n light_data = light.lux\n uv_raw = uv.uv_raw\n uv_data = uv.get_index(uv_raw)\n\n # Read BME280.\n temp_data = bme280.temperature\n # Convert temperature (C->F)\n temp_data = temp_data * 1.8 + 32\n humid_data = bme280.humidity\n pressure_data = bme280.pressure\n\n # Write to database\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n curs.execute(\"INSERT INTO data values(?, ?, ?, ?, ?, ?, ?, ?)\",\n (now, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data,\n light_data, uv_data))\n conn.commit()\n conn.close()", "def get_all_sensors():\n\tquery_url = 'http://localhost:8079/api/query'\n\tquery = \"select *\"\n\tr = requests.post(query_url, query)\n\treturn r.content", "def sensor_history(self, sensor_name, start_time_sec, end_time_sec,\n include_value_ts=False, timeout_sec=0):\n\n if timeout_sec != 0:\n self._logger.warn(\n \"timeout_sec is no longer supported. Default tornado timeout is used\")\n\n params = {\n 'sensor': sensor_name,\n 'start_time': start_time_sec,\n 'end_time': end_time_sec,\n 'limit': MAX_SAMPLES_PER_HISTORY_QUERY,\n 'include_value_time': include_value_ts\n }\n\n url = url_concat(\n (yield self.get_sitemap())['historic_sensor_values'] + '/query', params)\n self._logger.debug(\"Sensor history request: %s\", url)\n response = yield self._http_client.fetch(url)\n data_json = json.loads(response.body)\n if 'data' not in data_json:\n raise SensorHistoryRequestError(\"Error requesting sensor history: {}\"\n .format(response.body))\n data = []\n for item in data_json['data']:\n if 'value_time' in item:\n sample = SensorSampleValueTime(item['sample_time'],\n item['value_time'],\n item['value'],\n item['status'])\n else:\n sample = SensorSample(item['sample_time'],\n item['value'],\n item['status'])\n data.append(sample)\n result = sorted(data, key=_sort_by_sample_time)\n raise tornado.gen.Return(result)", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def query(monitorPoint) :\n return s.query(monitorPoint)", "def sensors():\n sensor_data = query_db('SELECT * FROM sensors')\n return jsonify(results=sensor_data)", "def get_sondes(client, start, end):\n\n sonde_query_str = \"SELECT * FROM cfog.sharp_radiosonde \" + \\\n f\"WHERE LaunchTime BETWEEN '{start}' AND '{end}' \" + \\\n \"ORDER BY LaunchTime ASC\"\n\n print(f\"Executing bigquery query string: \")\n print(sonde_query_str + '\\n')\n\n sonde_data = {f\"{s['LaunchTime'].strftime('%m-%d_%H')}\":s for s in client.query(query=sonde_query_str)}\n\n print(\"Radiosondes obtained within the queried time bounds: \")\n print(list(sonde_data))\n\n sonde_data_out = {}\n for t in sonde_data:\n # ignored col: SoundingIdPk, RadioRxTimePk, PtuStatus\n sonde_data_out[t] = {}\n sonde_data_out[t]['df'] = pd.DataFrame({\n 'DataSrvTime' : sonde_data[t]['DataSrvTime'],\n 'Pressure' : sonde_data[t]['Pressure'],\n 'Temperature' : sonde_data[t]['Temperature'],\n 'Humidity' : sonde_data[t]['Humidity'],\n 'WindDir' : sonde_data[t]['WindDir'],\n 'WindSpeed' : sonde_data[t]['WindSpeed'],\n 'WindNorth' : sonde_data[t]['WindNorth'],\n 'WindEast' : sonde_data[t]['WindEast'],\n 'Height' : sonde_data[t]['Height'],\n 'WindInterpolated' : sonde_data[t]['WindInterpolated'],\n 'Latitude' : sonde_data[t]['Latitude'],\n 'Longitude' : sonde_data[t]['Longitude'],\n 'North' : sonde_data[t]['North'],\n 'East' : sonde_data[t]['East'],\n 'Up' : sonde_data[t]['Up'],\n 'Altitude' : sonde_data[t]['Altitude'],\n 'Dropping' : sonde_data[t]['Dropping']\n }\n )\n sonde_data_out[t]['LaunchTime'] = sonde_data[t]['LaunchTime']\n sonde_data_out[t]['LaunchLatitude'] = sonde_data[t]['LaunchLatitude']\n sonde_data_out[t]['LaunchLongitude'] = sonde_data[t]['LaunchLongitude']\n\n print(f\"Query complete. Total number of data entries: {len(sonde_data_out)}.\\n\\n\")\n\n del sonde_data\n return sonde_data_out", "def get_data(last):\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res", "def _poll_sensors(conn, cursor):\n conn, c = _get_db_connection()\n\n motion_reading = catnanny.motionsensor()\n temp_reading = catnanny.tempreading()\n\n current_timestamp = datetime.now().isoformat()\n # insert a timestamp, the word motion, and the output from catnanny.motionsensor into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'motion', motion_reading))\n # insert a timestamp, the word temperature, and the output from catnanny.tempreading into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'temperature', temp_reading))\n\n conn.commit()", "def GEEviLandsat(ptsFile,metric,timeStep,sensor,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define dictionary for raster random names\n sensor_d = {}\n sensor_d['L4'] = 'LANDSAT/LT04/C01/T1_SR'\n sensor_d['L5'] = 'LANDSAT/LT05/C01/T1_SR'\n sensor_d['L7'] = 'LANDSAT/LE07/C01/T1_SR'\n sensor_d['L8'] = 'LANDSAT/LC08/C01/T1_SR'\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n \n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n \n for sen in sensor:\n LS = ee.ImageCollection(sensor_d[sen])\n #senL = [sen]\n \n def maskbyBits(img):\n QA = img.select('pixel_qa')\n QA1 = getQABits(QA, 3, 3, 'QA')\n QA2 = getQABits(QA, 5, 5, 'QA')\n\n mask = QA1.eq(0).And(QA2.eq(0))\n return img.updateMask(mask)\n \n LSm = LS.map(maskbyBits)\n \n lastImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n startYear = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYear = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)])\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n \n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n for met in metric:\n # metL = [met]\n\n if (sen == 'L8' and met == \"NDVI\"):\n bands = ['B5', 'B4']\n elif (sen != 'L8' and met == \"NDVI\"):\n bands = ['B4', 'B3']\n elif (sen == 'L8' and met == \"NDWI\"):\n bands = ['B5', 'B6']\n elif (sen != 'L8' and met == \"NDWI\"):\n bands = ['B4', 'B5']\n elif (sen == 'L8' and met == \"NBR\"):\n bands = ['B5', 'B7']\n elif (sen != 'L8' and met == \"NBR\"):\n bands = ['B4', 'B7']\n #else:\n #print(\"wrong metric specified\")\n \n def addVI(image):\n vi = (image.normalizedDifference(bands)\n .rename('VI'))\n return image.addBands(vi)\n\n withVI = LSm.map(addVI)\n\n VI_col = withVI.select('VI')\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'lowest':\n\n img_col = VI_col\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for Landsat: ' + sen + '_' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for Landsat: ' + sen + '_' + met)\n\n else:\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for Landsat: ' + sen + '_' + met)", "def describeSensor(self, graph, uri):\n ret = []\n for p, o in graph.query(\"SELECT ?p ?o WHERE { ?uri ?p ?o }\",\n initBindings=dict(uri=uri)):\n if p in [RDFS.label, RDF.type]:\n continue\n ret.append('%s: <span class=\"value\">%s</span>' % (\n linked(graph.label(p), p),\n linked(graph.label(o), o) if isinstance(o, URIRef) else\n cgi.escape(o)))\n return '; '.join(ret)", "def electricity(osm_path): \n return retrieve(osm_path,'lines',['power','voltage'],**{'voltage':[\" IS NULL\"],})", "def sensor_values(self, filters, components=None, include_value_ts=False):\n if isinstance(components, list):\n components = \",\".join(components)\n else:\n components = \"all\"\n\n url = (yield self.get_sitemap())['monitor'] + '/list-sensors/' + components\n\n if isinstance(filters, basestring):\n filters = [filters]\n\n results_to_return = {}\n\n for filt in filters:\n query_url = url_concat(url, {\"reading_only\": \"1\", \"name_filter\": filt})\n response = yield self._http_client.fetch(query_url)\n try:\n results = json.loads(response.body)\n except ValueError:\n raise InvalidResponseError(\n \"Request to {} did not respond with valid JSON\".format(url))\n\n if len(results) == 0:\n raise SensorNotFoundError(\"No values for filter {} found\".format(filt))\n\n for result in results:\n if include_value_ts:\n results_to_return[result['name']] = SensorSampleValueTime(\n sample_time=result['time'],\n value_time=result['value_ts'],\n value=result['value'],\n status=result['status'])\n else:\n results_to_return[result['name']] = SensorSample(\n sample_time=result['time'],\n value=result['value'],\n status=result['status'])\n\n raise tornado.gen.Return(results_to_return)", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def main(temp, humid):\n user = 'root'\n password = 'root'\n dbname = 'iot'\n dbuser = 'raspberry'\n dbuser_password = 'password'\n query = 'select temp_value,humid_value from temp_humid;'\n json_body = [\n {\n \"measurement\": \"temp_humid\",\n \"fields\": {\n \"temp_value\": temp,\n \"humid_value\":humid \n\t}\n }\n ]\n\n client = InfluxDBClient('localhost', 8086, user, password, dbname)\n\n #client.create_database(dbname)\n\n print(\"Write points: {0}\".format(json_body))\n client.write_points(json_body)\n\n #print(\"Querying data: \" + query)\n #result = client.query(query)\n\n #print(\"Result: {0}\".format(result))\n\n #client.drop_database(dbname)", "def _latest_sensor_glucose_entry_in_range(from_datetime, to_datetime):\n glucose_pages_dict = json.loads(\n _pump_output(\n \"filter_glucose_date\",\n from_datetime.isoformat(),\n to_datetime.isoformat()\n )\n )\n last_page = glucose_pages_dict[\"end\"]\n glucose_history = json.loads(_pump_output(\"read_glucose_data\", str(last_page)))\n glucose_iterator = (x for x in reversed(glucose_history) if x[\"name\"] in (\"GlucoseSensorData\",\n \"CalBGForGH\"))\n\n last_datetime = to_datetime\n\n while from_datetime <= last_datetime:\n try:\n glucose_dict = next(glucose_iterator)\n except StopIteration:\n break\n\n last_datetime = parse(glucose_dict[\"date\"])\n amount = glucose_dict.get(\"sgv\", glucose_dict.get(\"amount\", 0))\n if amount > 0 and from_datetime <= last_datetime <= to_datetime:\n return glucose_dict", "def get(self):\n if now()-self.last_query < 1./self.query_rate:\n return None,None\n self.last_query = now()\n\n # query from saver (an old strategy that may be desired at points): \n #self.saver.query_flag.value = True\n #fr = mp2np(self.saver.query_queue)\n #frts = self.saver.query_queue_ts.value\n \n # query from _PSEye (a newer strategy that is preferable for most uses):\n self.pseye.query_flag.value = True\n while self.pseye.query_flag.value == True:\n pass\n fr = self.pseye.query_queue[0]\n frts = self.pseye.query_queue_ts.value\n\n x,y = self.resolution[self.query_idx]\n return frts,fr.reshape([y,x])", "def update(self):\n url = 'https://airapi.airly.eu/v2/measurements/point' \\\n '?lat={}&lng={}&maxDistanceKM=2'.format(self._latitude,\n self._longitude)\n headers = {'Accept': CONTENT_TYPE_JSON, 'apikey': self._token}\n request = requests.get(url, headers=headers)\n _LOGGER.debug(\"New data retrieved: %s\", request.status_code)\n if request.status_code == HTTP_OK and request.content.__len__() > 0:\n if (request.json()['current']['indexes'][0]['description'] ==\n ATTR_NO_SENSOR_AVAILABLE):\n _LOGGER.error(ATTR_NO_SENSOR_AVAILABLE)\n else:\n self.get_data(request.json())", "def filter_sensor_data(self,request):\n\n data = QueryDict.dict(request.data)\n if data[\"value\"]==\"min\":\n sensor_obj = SensorData.objects.filter( reading_date__gte = data[\"start_date\"],\n reading_date__lte = data[\"end_date\"])\n data_value = sensor_obj.aggregate(Min('reading'))\n data_value = data_value[\"reading__min\"]\n elif data[\"value\"]==\"max\":\n sensor_obj = SensorData.objects.filter(reading_date__gte=data[\"start_date\"],\n reading_date__lte=data[\"end_date\"])\n data_value = sensor_obj.aggregate(Max('reading'))\n data_value = data_value[\"reading__max\"]\n elif data[\"value\"]==\"average\":\n sensor_obj = SensorData.objects.filter(reading_date__gte=data[\"start_date\"],\n reading_date__lte=data[\"end_date\"])\n data_value = sensor_obj.aggregate(Avg('reading'))\n data_value = data_value[\"reading__avg\"]\n serializer = SensorDataSerializer(sensor_obj, many=True)\n context = {\n \"value\":data_value,\n \"data_list\":serializer.data\n }\n return Response(context, 200)", "def get_datapoints(self, rid, t0, t1, nmax = 300):\n self.read_curs.execute(\"SELECT COUNT(*) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ?\", (int(rid), t0, t1))\n if self.read_curs.fetchone()[0] > nmax:\n self.read_curs.execute(\"SELECT avg(time),avg(value) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? GROUP BY round(time/?) ORDER BY time DESC\", (int(rid), t0, t1, (t1-t0)/nmax));\n else:\n self.read_curs.execute(\"SELECT time,value FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? ORDER BY time DESC\", (int(rid), t0, t1))\n return self.read_curs.fetchall()", "def test_get_measurement_history(self):\n device = DeviceFactory(node=Node.objects.first(), external_id='123', type__code=SecureDeviceType.SRT321,\n device_param__type__code=SecureDeviceParameterType.MEASURED_TEMPERATURE)\n d_id_1 = device.external_id\n\n now_loc = datetime.datetime.now(bst)\n ts_loc = now_loc - datetime.timedelta(seconds=30)\n ts_str = ts_loc.strftime('%Y-%m-%dT%H:%M:%S')\n\n data = self.create_secure_server_push_data(d_id_1, ts_str)\n\n SecureClient.process_push_data(data)\n time.sleep(.5)\n\n # get newer timestamp\n ts_str = now_loc.strftime('%Y-%m-%dT%H:%M:%S')\n data = self.create_secure_server_push_data(d_id_1, ts_str, value=\"23.5\")\n\n SecureClient.process_push_data(data)\n\n token = Token.objects.get(user__username=email)\n device_param = device.parameters.first()\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n url = reverse('api:device_measurements', kwargs={'device_parameter_id': device_param.id})\n\n time.sleep(.5)\n\n response = client.get(url, format='json')\n\n self.assertTrue(response.status_code == 200)\n self.assertTrue(len(response.data) >= 2)", "def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):\n index_end = len(sensor_window)\n index_start = index_end - self._obs_history\n self._q_ = np.array([sensor_window[i]['q_actual'][0] for i in range(index_start,index_end)])\n self._qt_ = np.array([sensor_window[i]['q_target'][0] for i in range(index_start,index_end)])\n self._qd_ = np.array([sensor_window[i]['qd_actual'][0] for i in range(index_start,index_end)])\n self._qdt_ = np.array([sensor_window[i]['qd_target'][0] for i in range(index_start,index_end)])\n self._qddt_ = np.array([sensor_window[i]['qdd_target'][0] for i in range(index_start,index_end)])\n\n self._current_ = np.array([sensor_window[i]['i_actual'][0] for i in range(index_start,index_end)])\n self._currentt_ = np.array([sensor_window[i]['i_target'][0] for i in range(index_start,index_end)])\n self._currentc_ = np.array([sensor_window[i]['i_control'][0] for i in range(index_start,index_end)])\n self._mt_ = np.array([sensor_window[i]['m_target'][0] for i in range(index_start,index_end)])\n self._voltage_ = np.array([sensor_window[i]['v_actual'][0] for i in range(index_start,index_end)])\n\n\n self._safety_mode_ = np.array([sensor_window[i]['safety_mode'][0] for i in range(index_start,index_end)])\n\n #TODO: should there be checks for safety modes greater than pstop here, and exit if found?\n\n # Compute end effector position\n x = ur_utils.forward(sensor_window[-1]['q_actual'][0], self._ik_params)[:3, 3]\n np.copyto(self._x_, x)\n\n if self._target_type == 'position':\n self._target_diff_ = self._x_[self._end_effector_indices] - self._target_\n elif self._target_type == 'angle':\n self._target_diff_ = self._q_[-1, self._joint_indices] - self._target_\n\n self._reward_.value = self._compute_reward_()\n if self._reward_type == \"sparse\":\n done = self._reward_.value >= 0\n else:\n done = 0\n # TODO: use the correct obs that matches the observation_space\n return np.concatenate((self._q_[:, self._joint_indices].flatten(),\n self._qd_[:, self._joint_indices].flatten() / self._speed_high,\n self._target_diff_,\n self._action_ / self._action_high,\n [self._reward_.value],\n [done]))", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def sensor(self):\n return ProxyList(self, OxfordITC503.Sensor, range(3))", "def query_radar_data(station,product,start,\n minute_delta=0,hour_delta=0,day_delta=0):\n \n end = start+timedelta(days=day_delta, minutes=minute_delta, hours=hour_delta)\n \n print(f\"query start time:{start}\")\n print(f\"query end time:{end}\")\n rs = RadarServer('http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/')\n query = rs.query()\n rs.validate_query(query)\n print(rs.stations[station])\n\n query.stations(station).time_range(start,end).variables(product)\n catalog = rs.get_catalog(query)\n file_station = str(catalog.datasets[0])\n file_station = file_station[0:4]\n \n file_list = list(catalog.datasets.values())\n for t in file_list: print(t)\n LatLonBox = [rs.stations[station].longitude-3,rs.stations[station].longitude+3,\n rs.stations[station].latitude-2,rs.stations[station].latitude+2]\n \n return file_list,LatLonBox", "async def read(self, sensors):\n\n try:\n timeout = aiohttp.ClientTimeout(total=5)\n async with aiohttp.ClientSession(timeout=timeout,\n raise_for_status=True) as session:\n current_url = self.url_info\n async with session.get(current_url) as response:\n data = await response.text()\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n\n for row in reader:\n self.serialnumber = row.pop(0)\n else:\n xml = ET.fromstring(data)\n\n find = xml.find(\"SN\")\n if find is not None:\n self.serialnumber = find.text\n\n _LOGGER.debug(\"Inverter SN: %s\", self.serialnumber)\n\n current_url = self.url\n async with session.get(current_url) as response:\n data = await response.text()\n at_least_one_enabled = False\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n ncol = len(next(reader))\n csv_data.seek(0)\n\n values = []\n\n for row in reader:\n for (i, v) in enumerate(row):\n values.append(v)\n\n for sen in sensors:\n if ncol < 24:\n if sen.csv_1_key != -1:\n try:\n v = values[sen.csv_1_key]\n except IndexError:\n v = None\n else:\n v = None\n else:\n if sen.csv_2_key != -1:\n try:\n v = values[sen.csv_2_key]\n except IndexError:\n v = None\n else:\n v = None\n\n if v is not None:\n if sen.name == \"state\":\n sen.value = MAPPER_STATES[v]\n else:\n sen.value = eval(\n \"{0}{1}\".format(v, sen.factor)\n )\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n else:\n xml = ET.fromstring(data)\n\n for sen in sensors:\n find = xml.find(sen.key)\n if find is not None:\n sen.value = find.text\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n\n if not at_least_one_enabled:\n if self.wifi:\n raise csv.Error\n else:\n raise ET.ParseError\n\n if sen.enabled:\n _LOGGER.debug(\"Got new value for sensor %s: %s\",\n sen.name, sen.value)\n\n return True\n except (aiohttp.client_exceptions.ClientConnectorError,\n concurrent.futures._base.TimeoutError):\n # Connection to inverter not possible.\n # This can be \"normal\" - so warning instead of error - as SAJ\n # inverters are powered by DC and thus have no power after the sun\n # has set.\n _LOGGER.warning(\"Connection to SAJ inverter is not possible. \" +\n \"The inverter may be offline due to darkness. \" +\n \"Otherwise check host/ip address.\")\n return False\n except aiohttp.client_exceptions.ClientResponseError as err:\n # 401 Unauthorized: wrong username/password\n if err.status == 401:\n raise UnauthorizedException(err)\n else:\n raise UnexpectedResponseException(err)\n except csv.Error:\n # CSV is not valid\n raise UnexpectedResponseException(\n str.format(\"No valid CSV received from {0} at {1}\", self.host,\n current_url)\n )\n except ET.ParseError:\n # XML is not valid or even no XML at all\n raise UnexpectedResponseException(\n str.format(\"No valid XML received from {0} at {1}\", self.host,\n current_url)\n )", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def fetch_data_from_db(sensorName):\n connection = sqlite3.connect('sensordata.db')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM sensordata WHERE name = :name\", {'name': sensorName})\n observedsensor = cursor.fetchall()\n return observedsensor" ]
[ "0.78036326", "0.56300086", "0.56219053", "0.54439676", "0.5414489", "0.5369397", "0.53085774", "0.5199531", "0.5161808", "0.51520276", "0.5134034", "0.5122384", "0.510574", "0.50754833", "0.50372785", "0.5034987", "0.5008669", "0.50082725", "0.5000846", "0.49865463", "0.49745566", "0.49654335", "0.49633363", "0.49038783", "0.49025807", "0.49005684", "0.48986858", "0.489094", "0.48893544", "0.48811346" ]
0.59702265
1
Plot time points given in data file and compare to x3
def plot_data(fname): if not os.path.isfile(fname): print('No data has been generated yet, aborting...') sys.exit(1) with open(fname, 'r') as fd: data = json.load(fd) x = np.arange(0, max(data, key=lambda e: e[0])[0], 1) const = .55e-8 func = lambda x: const * x**3 plt.plot( *zip(*data), label=r'ShRec3D data points', linestyle='None', marker='h' ) plt.plot(x, func(x), label=r'$ %.0e \cdot x^3$' % const) plt.title(r'Complexity ($\in \Theta\left(x^3\right)$) visualization of ShRec3D') plt.xlabel('loci number') plt.ylabel('execution time (seconds)') plt.legend(loc='best') plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def _figure_3():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 33]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure3.png', dpi=300)\n plt.gcf().clear()", "def plot_three(estacion,formato):\n global num_ticks\n\n if formato == 'vladi':\n ruta='/home/oscar/Doctorado/GPS/programas/python/datos_vladi/completos/'\n ns_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat1'\n ew_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat2'\n up_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat3'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[3:]\n ew_datos=ew_archivo.readlines()[3:]\n up_datos=up_archivo.readlines()[3:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'sara':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_sara/'\n ns_file = ruta + estacion.upper() + '/lat.' + estacion.lower() + '.dat'\n ew_file = ruta + estacion.upper() + '/long.' + estacion.lower() + '.dat'\n up_file = ruta + estacion.upper() + '/height.' + estacion.lower() + '.dat'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()\n ew_datos=ew_archivo.readlines()\n up_datos=up_archivo.readlines()\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'cabral':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_enrique_cabral/'\n ns_file = ruta + 'north_' + estacion.upper()\n ew_file = ruta + 'east_' + estacion.upper()\n up_file = ruta + 'vert_' + estacion.upper()\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[1:]\n ew_datos=ew_archivo.readlines()[1:]\n up_datos=up_archivo.readlines()[1:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ns_x = ns_date\n ns_y = ns_data\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ew_x = ew_date\n ew_y = ew_data\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[6]\n up_x = up_date\n up_y = up_data\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n else:\n exit('[ERROR] Unrecognized format')\n\n ind = np.where(ns_x >= 2000)\n ns_x = ns_x[ind[0]]\n ns_y = ns_y[ind[0]]\n ind = np.where(ew_x >= 2000)\n ew_x = ew_x[ind[0]]\n ew_y = ew_y[ind[0]]\n ind = np.where(up_x >= 2000)\n up_x = up_x[ind[0]]\n up_y = up_y[ind[0]]\n\n plt.figure(num=None, figsize=(7, 13))\n plt.subplots_adjust(wspace=.05)\n plt.subplot(3,1,1)\n plt.grid()\n plt.plot(ns_x,ns_y,'ro',mec='green',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ns_ticks,ns_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'NS'))\n plt.subplot(3,1,2)\n plt.grid()\n plt.plot(ew_x,ew_y,'ro',mec='blue',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ew_ticks,ew_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'EW'))\n plt.subplot(3,1,3)\n plt.grid()\n plt.plot(up_x,up_y,'ro',mec='blue',mfc='green',mew=.5,ms=3.0,alpha=0.5)\n plt.xlabel('Years since %4.1f'% (up_date[0]))\n plt.ylabel('Milimeters')\n plt.xticks(up_ticks,up_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'UP'))\n plt.subplots_adjust(bottom=0.1, top=0.95, hspace=.43)\n# plt.savefig(estacion.upper()+'_'+formato+'.jpg',dpi=300)\n plt.show()", "def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def draw_trajectory(filepath: str, timestamps: bool = False):\n\n t, x, y, z = coordinates.parse_coordinates_file(filepath=filepath)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.xlabel('X', fontsize=10, rotation = 0)\n plt.ylabel('Y', fontsize=10, rotation = 0)\n ax.set_zlabel('Z', fontsize=10, rotation = 0)\n\n # Add timestamps to plot\n if timestamps:\n for i in range(len(t)):\n timea = str(datetime.timedelta(seconds=t[i]))\n ax.annotate(timea, (x[i], y[i], z[i]),)\n\n ax.scatter(x, y, z, label='Траектория движения НКА')\n # ax.legend()\n\n plt.show()", "def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()", "def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_data_timeseries(X, output_file=None):\n\n n_features = X.shape[1]\n\n fig, ax = plt.subplots(nrows=n_features, figsize=(9, 4 * n_features),\n sharex=True, squeeze=False)\n\n for i in range(n_features):\n ax[i, 0].plot(X[:, i], '-')\n\n ax[i, 0].set_ylabel(r'$x_{:d}$'.format(i + 1))\n\n ax[i, 0].grid(ls='--', color='gray', alpha=0.5)\n\n if i == n_features - 1:\n ax[i, 0].set_xlabel('Time')\n\n if output_file is not None and output_file:\n plt.savefig(output_file, bbox_inches='tight')\n\n plt.show()", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False", "def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()", "def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()", "def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)", "def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()", "def plot_data(self):", "def one_period_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n\n NeA = object.NeA\n latA = object.latA\n times = object.secondsA\n mlt = object.mltA\n ind1 = 2606 #lat inds\n ind2 = 13940 #lat inds\n \n ind1 = 3197 #mlat inds\n ind2 = 14390 #mlat inds\n \n T = ind2 - ind1\n ind1 += int(T/2)\n ind2 += int(T/2)\n\n latA = latA[ind1:ind2]\n NeA = NeA[ind1:ind2]\n # NeA = object.meanie(NeA, 5)\n times = times[ind1:ind2]\n mlt = mlt[ind1:ind2]\n mlt = hour_round(mlt)\n\n lats = np.zeros_like(latA)\n lats[0] = latA[0]\n for i in range(len(latA)-1):\n dlat = latA[i+1] - latA[i]\n if dlat < 0:\n lats[i+1] = lats[i] - dlat\n else:\n lats[i+1] = lats[i] + dlat\n\n lats += 90\n\n xticks = np.array([-90, -70, -30, 30, 70, 110, 150, 210, 250, 270]) + 90\n gridticks = np.array([-90, -70, -30, 30, 70, 77, 103, 110, 150, 210, 250, 270]) + 90\n # plt.plot(lats, NeA, \".\", markersize = 1)\n # plt.plot([0, 0], [0, np.max(NeA)], \"k\")\n # plt.plot([30, 30], [0, np.max(NeA)], \"k\")\n # plt.plot([60, 60], [0, np.max(NeA)], \"k\")\n # plt.plot([120, 120],[0, np.max(NeA)], \"k\")\n # plt.plot([150, 150], [0, np.max(NeA)], \"k\")\n # plt.plot([167, 167], [0, np.max(NeA)], \"k\")\n # plt.plot([193, 193], [0, np.max(NeA)], \"k\")\n # plt.plot([210, 210], [0, np.max(NeA)], \"k\")\n # plt.plot([240, 244], [0, np.max(NeA)], \"k\")\n # plt.plot([300, 300], [0, np.max(NeA)], \"k\")\n # plt.plot([330, 330], [0, np.max(NeA)], \"k\")\n # plt.plot([360, 360], [0, np.max(NeA)], \"k\")\n # plt.xticks(xticks)\n # plt.xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n # plt.ylabel(\"Electron density [cm$^{-1}$]\")\n # plt.title(\"One SWARM satellite period\")\n # plt.grid(\"on\", axis = \"x\", xdata = gridticks)\n #adding letters\n x = (gridticks[:-1] + gridticks[1:])/2 - 3\n y = np.zeros_like(x) - np.max(NeA)/40\n s = [\"S\", \"B\", \"A\", \"B\", \"C\", \"D\", \"C\", \"B\", \"A\", \"B\", \"S\"]\n # for i in range(len(x)):\n # plt.text(x[i], y[i], s[i], fontsize = 10)\n # plt.savefig(\"Figures/swarm_period.pdf\")\n # plt.show()\n\n # plt.plot(times, latA)\n # plt.plot(times, mlt)\n # plt.show()\n print(lats[0])\n print(lats[-1])\n \n fig, ax = plt.subplots()\n ax.plot(lats, NeA, \".\", markersize = 1)\n ax.set_xticks(xticks, minor=False)\n ax.set_xticks([167, 193], minor=True)\n ax.xaxis.grid(True, which = \"major\")\n ax.xaxis.grid(True, which = \"minor\")\n for i in range(len(x)):\n ax.text(x[i], y[i], s[i], fontsize = 10)\n ax.set_xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n ax.set_ylabel(\"Electron density [cm$^{-1}$]\")\n ax.set_title(\"One Swarm satellite period\")\n # plt.savefig(\"Figures/swarm_period.pdf\")\n plt.show()\n plt.plot(mlt, NeA)\n plt.show()\n plt.plot(mlt, lats)\n plt.show()", "def loadtcdat(filename= None):\n\n import numpy as np\n from StringIO import StringIO\n import Tkinter\n from tkFileDialog import askopenfilename\n from matplotlib.pyplot import figure,subplot,plot,xlabel,ylabel,title,legend\n\n if filename is not None:\n print \"Opening %s\\n\" %(filename)\n else:\n root = Tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(parent=root, title='Open File',\n filetypes=[('csv files', '*.csv'),\n ('txt files', '*.txt')])\n root.destroy()\n root.mainloop()\n\n if filename is not None:\n f=open(filename)\n names = f.readline()\n names = names.strip('\\r\\n')\n names = names.split(\",\")\n f.close()\n\n data = np.genfromtxt(filename, delimiter=',',\n unpack=True, skip_header=2)\n time = data[0]\n\n figure()\n subplot(211)\n plot(time, data[1], label='Feed bin')\n plot(time, data[2], label='Part bin')\n ylabel(r'$ T_{bin} \\left(K\\right) $')\n legend(loc='best')\n\n subplot(212)\n plot(time,data[4],label='Feed bin heater')\n plot(time,data[5],label='Part bin heater')\n xlabel(r'$ Time \\left(s\\right) $')\n ylabel(r'$ P_{heater} \\left( \\frac{W}{m^2} \\right) $')\n legend(loc='best')\n\n return (data, time, names)", "def load_times(file_name):\n data = np.loadtxt(file_name)\n data = data[data[:, 0].argsort()]\n times = data[:, 0]\n values = data[:, 1]\n\n # Remove the mean amplitude and shift time origin\n times -= times[0]\n values -= np.mean(values)\n\n return times, values", "def _figure_2():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 0]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure2.png', dpi=300)\n plt.gcf().clear()", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def gentoplot(time):\n \n toplot = {}\n\n # Generates a list of movie paths in the data folder.\n files = dftf.batch_s('.') \n\n # Generates dft traces and plots for each roi in each movie.\n for file in files:\n os.chdir(file)\n print(os.path.basename(file))\n\n for col in COLS:\n \n if os.path.exists('params') == True:\n rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE, \n corrparamsfile=CORRPARAMS_FILE, colname=col)\n td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)\n moviename = os.path.basename(os.path.abspath('.'))\n \n # Selects the area of the raw trace to plot.\n frames = time * td['fps']\n #print(frames)\n plottime = td['seltrace'][:frames]/10\n #print(len(plottime))\n ms = plottime-np.mean(plottime)\n xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))\n #print(xsec)\n condition = td['condition']\n toplot[moviename] = [xsec, ms, condition]\n print(np.max(ms), np.min(ms))\n \n return(toplot)", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def plot_timeDB(timeDB, xunits='yr', yunits='MPa', skip=8, P0=33.0):\n time, pressure = np.loadtxt(timeDB, skiprows=skip, unpack=True)\n pressure = pressure * P0\n\n #if xunits == 'yr':\n # time = time / 31536000.0\n #elif xunits == 'day':\n # time = time / 86400.0\n\n plt.figure()\n plt.plot(time,pressure,'b.-',lw=3,label='pressure')\n plt.xlabel('Time [{}]'.format(xunits))\n plt.ylabel('Pressure [{}]'.format(yunits))\n plt.title('Time History')\n plt.show()", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def line(self,file=None,file2=None,data=None,xmin=None,xmax=None,ymin=None,ymax=None,col=(0,1),col2=(0,1),xtitle='xtitle',ytitle='ytitle',title=' ',save=None,linewidth=2,label=None,label2=None,legendloc=None,sizex=6,sizey=4):\n import matplotlib.pyplot as plt\n import numpy as np\n\n self.version()\n fig = plt.figure(figsize=(sizex, sizey))\n ax = fig.add_subplot(111)\n if xmin is not None:\n axes = plt.gca()\n axes.set_xlim([xmin, xmax])\n axes.set_ylim([ymin, ymax])\n\n if file is not None:\n data = np.loadtxt(file,usecols=col, unpack=True) #Read columns\n count = 0\n np.delete(data[1],0)\n np.delete(data[0],0)\n for coord in data[0]:\n data[0][count] = coord - 90\n if coord <= 90:\n data[0][count] += 360\n count = count + 1\n\n\n\n #print(coord)\n for i in col2:\n if i == 0:\n continue\n plt.plot(data[0], data[i], linewidth=linewidth,label = label)\n\n\n if file2 is not None:\n data2 = np.loadtxt(file2,usecols=col2, unpack=True)\n count = 0\n\n data2[1][0] = data2[1][-1]\n data2[1][1] = data2[1][-2]\n #print(len(data[0]),len(data2))\n for coord in data2[0]:\n print(data2[1][count])\n data2[0][count] = coord - 90\n if coord <= 90 and coord >=0:\n data2[0][count] += 360\n\n #if coord >=89 and coord <=91:\n # print(data2[1][count])\n #if data2[0][count]>=250 and data2[0][count] <=300:\n # data2[1][count] = 0\n count = count + 1\n\n for i in col2:\n if i == 0:\n continue\n plt.plot(data2[0],data2[i],'--',linewidth=linewidth,label = label2)\n\n\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n ax.legend(loc=legendloc)\n \"\"\"Loc:\n best -- 0\n upper right -- 1\n upper left -- 2\n lower left -- 3\n lower right -- 4\n right -- 5\n center left -- 6\n center right -- 7\n lower center -- 8\n upper center -- 9\n center -- 10\n \"\"\"\n self.save(fig, save)\n\n return None" ]
[ "0.65714025", "0.64165264", "0.64107704", "0.63405186", "0.631422", "0.6297773", "0.6248544", "0.615319", "0.60817546", "0.607438", "0.60634214", "0.60446906", "0.60240567", "0.601342", "0.6000441", "0.5957452", "0.5925133", "0.59175897", "0.5905955", "0.5905455", "0.5882972", "0.587616", "0.58669406", "0.5836873", "0.5815084", "0.58084685", "0.580844", "0.58037347", "0.57826895", "0.5773784" ]
0.6902801
0
Push the item in the front of the deque
def enqueue_front(self, item): self._items.insert(0, item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, item):\n super().add_item_to_front(item)", "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")", "def add_front(self, item):\n\n self.items.insert(0, item)", "def push_front(self, e):\n # initialize new Node with data e\n newNode = Node(e)\n # if the deque is empty\n if self.size == 0:\n # set the front and back to the new node\n self.front = self.back = newNode\n # if deque is not empty\n else:\n # previous front node is the prior to the new front Node\n newNode.prior = self.front\n # previous front node's next node is new node\n self.front.next = newNode\n # front node is the new node\n self.front = newNode\n # increment deque size\n self.size += 1", "def push(self, item):\n self.list.prepend(item)", "def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1", "def push(self, item):\n self.linked_list.prepend(item)", "def append_front(self, item):\n\n self.front = Node(item, self.front)", "def push(self, val):\r\n return self.deque.append(val)", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def add_first(self, data):\n self.deque.insert(0, data)", "def enqueue(self, item):\n if self.rear == None:\n self.front = Node(item)\n self.rear = self.front\n else:\n self.rear.next = Node(item)\n self.rear = self.rear.next", "def push_front(self, val):\n new_node = Node(val, self.head)\n if self.is_empty():\n self.tail = new_node\n self.head = new_node\n self.size += 1", "def push_front(self, item):\n new_node = Node(item)\n # if the list is empty, make it head\n if self.head is None:\n self.head = new_node\n # else, \n else:\n new_node.next = self.head # new node points to current head\n self.head = new_node # current head points to new_node\n self.n += 1", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def push_back(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_back()\n self.back_+=1\n self.data_[self.back_]= e\n self.size_+=1\n #print(\"case 1\")\n elif (self.front_ == -1 and self.size_==0):#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_=0\n self.data_[self.back_]= e\n self.size_+=1\n else:#The Back is not at the first index(possibly somewhere in between) and if we push back it we have to go up by one to move to the new back\n self.back_+=1\n self.data_[self.back_] =e \n self.size_+=1", "def push(self, item):\r\n self.stack.insert(0, item)", "def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list", "def left_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_next(self.head)\n\n # if the deque is empty, the new item is the tail\n if not self.tail:\n self.tail = item_to_add\n else:\n # connect the old head to the new head\n self.head.set_prev(item_to_add)\n\n # set the new node as the head\n self.head = item_to_add\n self.size += 1", "def push(self, x):\n self.values.append(x)\n if len(self.values) == 1:\n self.front = x", "def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1", "def push_front(self, value):\n new_node = self.Node(value)\n\n # Edge Case : List is empty\n if self._size == 0:\n self._tail = new_node\n self._head = new_node\n self._size += 1\n return\n\n new_node.next = self._head\n self._head.prev = new_node\n self._head = new_node\n self._size += 1", "def addFront(self, item, clock):\n temp = Node2Way(item, clock)\n temp.setPrevious(self._front)\n \n if self._size == 0:\n self._rear = temp\n else:\n self._front.setNext(temp)\n \n self._front = temp\n self._size += 1", "def enqueue(self, item):\n while len(self._stack1) > 0:\n self._stack2.push(self._stack1.pop())\n self._stack2.push(item)", "def enqueue(self, item):\n old_last = self.last\n self.last = self.Node(item)\n\n if self.is_empty():\n self.first = self.last\n else:\n old_last.next_node = self.last\n\n self.N += 1", "def push_front(self, param):\n if self.size == self.capacity:\n self.resize(2 * self.size)\n for _ in range(self.arr):\n pass", "def push(self, item):\n if len(self._data) == self.MAX_SIZE:\n # full we have to pop the oldest item (head)\n self._data.pop(0)\n self._data.append(item)" ]
[ "0.7934391", "0.79339534", "0.7679568", "0.74891657", "0.74482065", "0.7417094", "0.7404675", "0.7363072", "0.7294415", "0.72747624", "0.7242487", "0.7242487", "0.72093624", "0.71174246", "0.707182", "0.7052566", "0.70389926", "0.70353955", "0.701858", "0.70012", "0.6997899", "0.69728386", "0.6923687", "0.6915132", "0.69096", "0.68918145", "0.6890495", "0.68384916", "0.68355286", "0.6809102" ]
0.8026828
0
Pop the item in the front of the deque. Raise IndexError if the deque is empty.
def dequeue_front(self): try: return self._items.pop(0) except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_front(self):\n # set temp to deque's front for return\n temp = self.front\n # if deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if the deque has more than one element\n else:\n # set front to front's prior node, set that node's next to\n # none, and decrement deque's size by 1\n self.front = self.front.prior\n self.front.next = None\n self.size -= 1\n # return previous front node's data\n return temp.data", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def dequeue_rear(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The deque is empty')", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def pop_back(self):\n # set temp to back node of deque\n temp = self.back\n # if the deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if deque has more than one element\n else:\n # set deque's back to previous back's next, set the new\n # back's prior to None, and decrement deque size\n self.back = self.back.next\n self.back.prior = None\n self.size -= 1\n # return previous back node's data\n return temp.data", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def peek_back(self):\n if ((self.is_empty()) or self.data_[self.back_]== None):#If we trip this if block we raise an error since we know the deque should be empty \n raise IndexError\n return self.data_[self.back_]", "def removeFront(self):\n if self._size == 0:\n raise AttributeError(\"Cannot removeFront from an empty Deque\")\n \n temp = self._front\n self._front = self._front.getPrevious()\n if self._size == 1:\n # removing only item which is the rear as well as the front item\n self._rear = None\n else:\n self._front.setNext(None)\n self._size -= 1\n \n return temp.getData()", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def dequeue(self):\n if not self.front:\n raise AttributeError(\"Can't dequeue from an empty queue\")\n\n removed = self.front\n self.front = self.front.next\n return removed.value\n # try:\n # removed = self.front\n # self.front = self.front.next\n # return removed.value\n # except AttributeError:\n # return \"Can't dequeue from an empty queue\"", "def dequeue(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n\n item = self.first.item\n self.first = self.first.next_node\n self.N -= 1\n\n if self.is_empty():\n self.last = None # To avoid loitering\n\n return item", "def dequeue(self):\n try:\n temp = self.front\n self.front = self.front.next\n temp.next = None\n return temp.value\n except Exception:\n return \"the queue is empty\"", "def dequeue(self):\n try:\n return self._container.pop()\n except IndexError:\n raise IndexError(\"Cannot dequeue from empty queue.\")", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue is empty\")\n answer = self._data[self._front]\n self._data[self._front]\n self._data = (self._front+1)%len(self._data)\n self._size-=1\n return answer", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def peek_front(self):\n if ((self.is_empty()) or self.data_[self.front_]== None): #If we trip this if block we raise an error since we know the deque should be empty \n raise IndexError\n return self.data_[self.front_]", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop_from_deque(self):", "def pop(self):\n return super().remove_item_from_front()", "def dequeue(self) -> object:\r\n if self.is_empty():\r\n raise QueueException\r\n value = self.da.get_at_index(0)\r\n self.da.remove_at_index(0)\r\n return value", "def pop(self):\n try:\n if self.size() > 0:\n top = self.top()\n self.items.pop()\n return top\n else:\n raise IndexError('Cannot pop item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def dequeue(self):\n if len(self) == 1:\n self.tail = None\n return self.pop()", "def peek_back(self):\n # if the deque is empty\n if self.is_empty():\n # raise an IndexError\n raise IndexError()\n # if deque is not empty, return back's data\n return self.back.data", "def pop(self, pos=None):\n \n if self.is_empty():\n raise IndexError('pop from empty list')\n \n if pos is None:\n pos = self.length() - 1\n \n elif pos >= self.length():\n raise IndexError('pop index out of range')\n \n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n # If the item to be removed is the first item\n if pos == 0:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n return current.get_data()", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def pop(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Pop at the beginning of the list.\n elif index == 0:\n item = self._first\n # modify self._first\n self._first = self._rest._first\n self._rest = self._rest._rest\n return item\n # Recursive case\n else:\n if not self._rest:\n raise IndexError\n return self._rest.pop(index - 1)" ]
[ "0.7918459", "0.7688422", "0.7626735", "0.76226133", "0.7617293", "0.7505443", "0.7500941", "0.7439415", "0.740664", "0.7395839", "0.73582757", "0.73489094", "0.7289233", "0.7263663", "0.7241764", "0.724081", "0.7221747", "0.71959555", "0.7189014", "0.7156972", "0.7149371", "0.71443814", "0.708917", "0.7076477", "0.7070038", "0.70555925", "0.7050435", "0.70376104", "0.7025924", "0.70223874" ]
0.81932133
0
Pop the item in the end of the deque. Raise IndexError if the deque is empty.
def dequeue_rear(self): try: return self._items.pop() except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def dequeue_front(self):\n try:\n return self._items.pop(0)\n except:\n raise IndexError('The deque is empty')", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def dequeue(self):\n try:\n return self._container.pop()\n except IndexError:\n raise IndexError(\"Cannot dequeue from empty queue.\")", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def dequeue(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n\n item = self.first.item\n self.first = self.first.next_node\n self.N -= 1\n\n if self.is_empty():\n self.last = None # To avoid loitering\n\n return item", "def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def pop(self):\n try:\n item = self._items.pop()\n # This operation decrements the number of items\n # in the stack, we need to update the count variable\n self._update_count()\n return item\n except IndexError:\n raise IndexError(\"Stack is empty\")", "def pop_back(self):\n # set temp to back node of deque\n temp = self.back\n # if the deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if deque has more than one element\n else:\n # set deque's back to previous back's next, set the new\n # back's prior to None, and decrement deque size\n self.back = self.back.next\n self.back.prior = None\n self.size -= 1\n # return previous back node's data\n return temp.data", "def pop(self, pos=None):\n \n if self.is_empty():\n raise IndexError('pop from empty list')\n \n if pos is None:\n pos = self.length() - 1\n \n elif pos >= self.length():\n raise IndexError('pop index out of range')\n \n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n # If the item to be removed is the first item\n if pos == 0:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n return current.get_data()", "def pop(self) -> int:\n return self._deque.pop(0)", "def pop_from_deque(self):", "def pop(self, index):\r\n if index < 0 or index >= self.size():\r\n raise IndexError(\"Array index out of bounds\")\r\n itemToReturn = self._items[index]\r\n # Shift items up by one position\r\n for i in range(index, self.size() - 1):\r\n self._items[i] = self._items[i + 1]\r\n # Reset empty slot to fill value\r\n self._items[self.size() - 1] = self._fillValue\r\n self._logicalSize -= 1\r\n if self.size() <= len(self) // 4 and len(self) > self._capacity:\r\n self.shrink()\r\n return itemToReturn", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self):\n try:\n if self.size() > 0:\n top = self.top()\n self.items.pop()\n return top\n else:\n raise IndexError('Cannot pop item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def pop(self):\n try:\n return self.array.pop()\n except IndexError as e:\n return None", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue is empty\")\n answer = self._data[self._front]\n self._data[self._front]\n self._data = (self._front+1)%len(self._data)\n self._size-=1\n return answer", "def dequeue(self) -> object:\r\n if self.is_empty():\r\n raise QueueException\r\n value = self.da.get_at_index(0)\r\n self.da.remove_at_index(0)\r\n return value", "def pop(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The stack is empty.')", "def pop_last(self):\n self.pop_item(-1)", "def pop(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Pop at the beginning of the list.\n elif index == 0:\n item = self._first\n # modify self._first\n self._first = self._rest._first\n self._rest = self._rest._rest\n return item\n # Recursive case\n else:\n if not self._rest:\n raise IndexError\n return self._rest.pop(index - 1)", "def pop(self):\n if self.is_empty():\n raise Exception(\"Stack is empty.\")\n\n self.size -= 1\n return self.arr.pop()", "def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data" ]
[ "0.77415574", "0.76645964", "0.76196545", "0.7619073", "0.74518174", "0.73892117", "0.7347819", "0.73183465", "0.72986156", "0.7289281", "0.7255884", "0.7225652", "0.721948", "0.721712", "0.72019696", "0.71845245", "0.7174988", "0.7146664", "0.7139901", "0.71300745", "0.71008176", "0.7079559", "0.70788455", "0.7059101", "0.70460546", "0.703343", "0.70224077", "0.7019088", "0.7013425", "0.7006933" ]
0.7715742
1
Returns an array of full paths for a relative path with globs
def expand_path(__file__, path_with_globs): return glob.glob(relative_path(__file__, path_with_globs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def expand_paths(__file__, paths_with_globs):\n if isinstance(paths_with_globs, str):\n return expand_path(__file__, paths_with_globs)\n else:\n expanded_globs = [\n expand_path(__file__, path) for path in paths_with_globs\n ]\n # Flatten\n return list(itertools.chain.from_iterable(expanded_globs))", "def recursive_glob(path):\n if \"*\" not in path:\n # Glob isn't needed.\n return [path]\n elif \"**\" not in path:\n # Recursive glob isn't needed.\n return path_utils.glob(path)\n else:\n return path_utils.glob(path, recursive=True)", "def abspath(files):\n\n files = sum([glob.glob(x) for x in files], [])\n return [os.path.abspath(x) for x in files]", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def expand_globpaths(globpaths, cwd=None):\n with cd(cwd):\n paths = sum((recursive_glob(p) for p in globpaths), [])\n return expand_paths(paths, cwd)", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def get_all_img_paths(path_to_folder):\n all_subfolders = glob.glob(path_to_folder + '*')\n all_paths = []\n for folder in all_subfolders:\n all_paths.extend(glob.glob(folder + '/*'))\n # get relative paths\n common_prefix = path_to_folder\n relative_paths = [os.path.relpath(path, common_prefix) for path in all_paths]\n return relative_paths", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def get_files(pattern):\n\n files = [realpath(p) for p in glob2.glob(pattern)]\n return list(set(files))", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def resolve_file_paths(local_path):\n local_path = os.path.abspath(local_path)\n files = []\n if local_path.find('*') > -1:\n # Supplied path is a pattern - relative directory will be the\n # path up to the first wildcard\n ref_dir_str = local_path.split('*')[0].rstrip('/\\\\')\n if not os.path.isdir(ref_dir_str):\n ref_dir_str = os.path.dirname(ref_dir_str)\n ref_dir = pathlib.Path(ref_dir_str)\n pattern = local_path[len(ref_dir_str + os.pathsep):]\n files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]\n local_path = ref_dir_str\n else:\n if os.path.isdir(local_path):\n # Supplied path is a directory\n files = [os.path.join(local_path, f) for f in os.listdir(local_path)\n if os.path.isfile(os.path.join(local_path, f))]\n elif os.path.isfile(local_path):\n # Supplied path is a file\n files.append(local_path)\n local_path = os.path.dirname(local_path)\n return local_path, files", "def glob(self, pathname, with_matches=False):\r\n return list(self.iglob(pathname, with_matches))", "def glob(glob_pattern: str, directoryname: str) -> List[str]:\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs(\n (*self.file_includes, *self.dir_includes, *self.ignores), glob_match_error_behavior\n )", "def globs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n def relative_glob(globspec):\r\n for fn in glob.glob(os.path.join(root, globspec)):\r\n yield os.path.relpath(fn, root)\r\n def combine(files, globspec):\r\n return files ^ set(relative_glob(globspec))\r\n return cls(lambda: reduce(combine, globspecs, set()))", "def expand(self, path_list):\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2", "def glob(self):\n self._deduplicate()\n result = []\n for entry in self._entries:\n pp = entry.posix_path()\n if GLOBBABLE_REGEX.search(pp):\n try:\n globs = glob.glob(entry.posix_path())\n result += globs\n except re.error:\n result.append(pp)\n else:\n result.append(pp)\n self._entries = [Path(g) for g in result]\n self._clean = False\n self._current = 0", "def expand_paths(paths, cwd=None):\n return [expand_path(x, cwd) for x in paths]", "def glob(path):\n path = os.path.abspath(path)\n if os.path.isdir(path):\n files = [d for d in [\n os.path.join(path, f) for f in os.listdir(path)\n ] if os.path.isfile(d)]\n else:\n files = glob.glob(path)\n print(\"Found {0} files\".format(len(files)))\n return files", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def files(pathspec):\n\treturn [f for f in glob.glob(pathspec)]", "def getFilesMulti(paths, pat):\n filelist = []\n for d in paths:\n filelist += glob.glob( os.path.join(d,pat) )\n filelist = [ f.replace(os.path.sep,'/') for f in filelist]\n return filelist", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []" ]
[ "0.78830874", "0.74837524", "0.73281705", "0.7273039", "0.7240019", "0.7209716", "0.7107599", "0.7090277", "0.70357305", "0.69723034", "0.69063663", "0.68546826", "0.68303967", "0.6820884", "0.68082666", "0.6802939", "0.6779365", "0.67275643", "0.6712124", "0.67007935", "0.666919", "0.6668771", "0.6655419", "0.66498345", "0.66493094", "0.6614659", "0.6612365", "0.66040355", "0.65995", "0.65812266" ]
0.7740084
1
One solution would be to do an inorder traversal and sum the values along the way (or just recursive sum along the tree). => O(N) but in case the range [lo,hi] is small, this is wasteful.
def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int: def visit(node: TreeNode) -> int: if not node: return 0 if node.val < lo: return visit(node.right) elif hi < node.val: return visit(node.left) else: return node.val + visit(node.left) + visit(node.right) return visit(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def reduce(self, start: int = 0, end: Optional[int] = None) -> Any:\n if end is None:\n end = self.capacity\n elif end < 0:\n end += self.capacity\n\n # Init result with neutral element.\n result = self.neutral_element\n # Map start/end to our actual index space (second half of array).\n start += self.capacity\n end += self.capacity\n\n # Example:\n # internal-array (first half=sums, second half=actual values):\n # 0 1 2 3 | 4 5 6 7\n # - 6 1 5 | 1 0 2 3\n\n # tree.sum(0, 3) = 3\n # internally: start=4, end=7 -> sum values 1 0 2 = 3.\n\n # Iterate over tree starting in the actual-values (second half)\n # section.\n # 1) start=4 is even -> do nothing.\n # 2) end=7 is odd -> end-- -> end=6 -> add value to result: result=2\n # 3) int-divide start and end by 2: start=2, end=3\n # 4) start still smaller end -> iterate once more.\n # 5) start=2 is even -> do nothing.\n # 6) end=3 is odd -> end-- -> end=2 -> add value to result: result=1\n # NOTE: This adds the sum of indices 4 and 5 to the result.\n\n # Iterate as long as start != end.\n while start < end:\n\n # If start is odd: Add its value to result and move start to\n # next even value.\n if start & 1:\n result = self.operation(result, self.value[start])\n start += 1\n\n # If end is odd: Move end to previous even value, then add its\n # value to result. NOTE: This takes care of excluding `end` in any\n # situation.\n if end & 1:\n end -= 1\n result = self.operation(result, self.value[end])\n\n # Divide both start and end by 2 to make them \"jump\" into the\n # next upper level reduce-index space.\n start //= 2\n end //= 2\n\n # Then repeat till start == end.\n\n return result", "def sumRangeTree2(self, i, j, cur):\n if i > j:\n return 0\n start, end = cur.start, cur.end\n if i == start and j == end:\n return cur.val\n mid = start+(end-start)/2\n return self.sumRangeTree(i, min(j, mid), cur.left) + self.sumRangeTree(max(mid+1, i), j, cur.right)", "def recursiveSums(desiredNum, values, depth=0, max_depth=5):\n depth+=1\n if(depth>max_depth):\n return\n if(len(values)==1):\n if(values[0]==desiredNum):\n return values[0]\n else:\n arr = []\n removals = []\n for i, value in enumerate(values):\n thisDesiredNum = desiredNum-value\n if(thisDesiredNum==0):\n arr.append(value)\n elif(thisDesiredNum>0):\n #quick fix prevents double counting here\n newValues = [l for l in values if(l not in removals)]\n newValues.pop(newValues.index(value))\n arr.append([value])\n if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum):\n newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth)\n if(newSums):\n if(isinstance(newSums, int)):\n arr.append([newSums])\n else:\n arr[-1].extend(newSums)\n if(len(arr[-1])==0 or arr[-1]==[value]):\n arr.pop()\n removals.append(value)\n #remove unusable values\n iteratedValues = [value for value in values if(value not in removals)]\n if(iteratedValues):\n arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth))\n return arr", "def value(d,o):\n # return memoized value if possible\n if (d,o) in v:\n return v[(d,o)]\n\n thisitem = int(t[d][o])\n # the total of a subtree that starts at the leaf, is just the value of the leaf\n if d == maxdepth:\n val = thisitem\n else:\n val = thisitem + max(value(d+1,o),value(d+1,o+1))\n\n v[(d,o)]=val\n return val", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def summationRecursion(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return lower + summationRecursion(lower + 1, upper)", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]", "def sum_values(values):\n return (sum(values))", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def binary_sums(start, limit):\n for n in range(start, limit):\n for i in range(1, n/2 + 1):\n yield i, n - i", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum(self) -> int:\n return self.root.sum", "def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def total(h):\r\n\treturn sum(i.points() for i in h)", "def sum_node_depths(node, current_sum, level):\n # Base case\n if node is None:\n return current_sum\n\n current_sum += level\n current_sum = sum_node_depths(node.left, current_sum, level + 1)\n current_sum = sum_node_depths(node.right, current_sum, level + 1)\n\n return current_sum", "def segment_sum(self, left, right):\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater", "def sum_elements(arr):\n return sum(arr)", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def sum(n):\n if n == 0:\n return 0\n return sum(n - 1) + n" ]
[ "0.6729964", "0.66551703", "0.64795923", "0.6404012", "0.63082033", "0.6260686", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.61943024", "0.6178861", "0.61783046", "0.61113393", "0.6107871", "0.6049075", "0.604374", "0.6043456", "0.603119", "0.5992261", "0.5984472", "0.5962361", "0.5950429", "0.59476715", "0.59434265", "0.592258", "0.5909091", "0.59060985", "0.59019375" ]
0.7450429
0
Loops over arrays in the arrays_iterator and evaluates the cut_function at the cut_values. Returns a list of efficiences, passed events/objects, and total events/objects. cut_function is expected to return a tuple (n_pass, n_total) with input (arrays, cut_value).
def get_eff(arrays_iterator, cut_function, cut_values): n_cuts = len(cut_values) n_total = np.zeros(n_cuts) n_pass = np.zeros(n_cuts) for arrays, dataset in arrays_iterator: weight = dataset.get_weight() for i_cut, cut in enumerate(cut_values): this_n_pass, this_n_total = cut_function(arrays, cut) n_total[i_cut] += weight * this_n_total n_pass[i_cut] += weight * this_n_pass # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0) return eff, n_pass, n_total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def iterated_array_from(\r\n self, func: Callable, cls: object, array_lower_sub_2d: Array2D\r\n ) -> Array2D:\r\n\r\n if not np.any(array_lower_sub_2d):\r\n return array_lower_sub_2d.slim\r\n\r\n iterated_array = np.zeros(shape=self.shape_native)\r\n\r\n threshold_mask_lower_sub = self.mask\r\n\r\n for sub_size in self.sub_steps[:-1]:\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=sub_size\r\n )\r\n\r\n try:\r\n threshold_mask_higher_sub = self.threshold_mask_via_arrays_from(\r\n array_lower_sub_2d=array_lower_sub_2d,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n iterated_array = self.iterated_array_jit_from(\r\n iterated_array=iterated_array,\r\n threshold_mask_higher_sub=threshold_mask_higher_sub,\r\n threshold_mask_lower_sub=threshold_mask_lower_sub,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n except ZeroDivisionError:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n if threshold_mask_higher_sub.is_all_true:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n array_lower_sub_2d = array_higher_sub\r\n threshold_mask_lower_sub = threshold_mask_higher_sub\r\n\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func,\r\n cls=cls,\r\n mask=threshold_mask_lower_sub,\r\n sub_size=self.sub_steps[-1],\r\n )\r\n\r\n iterated_array_2d = iterated_array + array_higher_sub.binned.native\r\n\r\n return self.return_iterated_array_result(iterated_array=iterated_array_2d)", "def metrics(img_gt, img_pred, voxel_size):\n\n if img_gt.ndim != img_pred.ndim:\n raise ValueError(\"The arrays 'img_gt' and 'img_pred' should have the \"\n \"same dimension, {} against {}\".format(img_gt.ndim,\n img_pred.ndim))\n\n res = []\n # Loop on each classes of the input images\n for c in [3, 1, 2]:\n # Copy the gt image to not alterate the input\n gt_c_i = np.copy(img_gt)\n gt_c_i[gt_c_i != c] = 0\n\n # Copy the pred image to not alterate the input\n pred_c_i = np.copy(img_pred)\n pred_c_i[pred_c_i != c] = 0\n\n # Clip the value to compute the volumes\n gt_c_i = np.clip(gt_c_i, 0, 1)\n pred_c_i = np.clip(pred_c_i, 0, 1)\n\n # Compute the Dice\n dice = dc(gt_c_i, pred_c_i)\n\n # Compute volume\n volpred = pred_c_i.sum() * np.prod(voxel_size) / 1000.\n volgt = gt_c_i.sum() * np.prod(voxel_size) / 1000.\n\n res += [dice, volpred, volpred-volgt]\n\n return res", "def distribution_cut(self, timestamp, window, slo_config):\n conf = slo_config['backend']\n measurement = conf['measurement']\n filter_valid = measurement['filter_valid']\n threshold_bucket = int(measurement['threshold_bucket'])\n good_below_threshold = measurement.get('good_below_threshold', True)\n\n # Query 'valid' events\n series = self.query(timestamp=timestamp,\n window=window,\n filter=filter_valid)\n series = list(series)\n\n if not series:\n return (0, 0) # no timeseries\n\n distribution_value = series[0].points[0].value.distribution_value\n # bucket_options = distribution_value.bucket_options\n bucket_counts = distribution_value.bucket_counts\n valid_events_count = distribution_value.count\n # growth_factor = bucket_options.exponential_buckets.growth_factor\n # scale = bucket_options.exponential_buckets.scale\n\n # Explicit the exponential distribution result\n count_sum = 0\n distribution = OrderedDict()\n for i, bucket_count in enumerate(bucket_counts):\n count_sum += bucket_count\n # upper_bound = scale * math.pow(growth_factor, i)\n distribution[i] = {\n # 'upper_bound': upper_bound,\n # 'bucket_count': bucket_count,\n 'count_sum': count_sum\n }\n LOGGER.debug(pprint.pformat(distribution))\n\n if len(distribution) - 1 < threshold_bucket:\n # maximum measured metric is below the cut after bucket number\n lower_events_count = valid_events_count\n upper_events_count = 0\n else:\n lower_events_count = distribution[threshold_bucket]['count_sum']\n upper_events_count = valid_events_count - lower_events_count\n\n if good_below_threshold:\n good_event_count = lower_events_count\n bad_event_count = upper_events_count\n else:\n good_event_count = upper_events_count\n bad_event_count = lower_events_count\n\n return (good_event_count, bad_event_count)", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def get_discrete_split_value(arr: np.ndarray, y: np.ndarray, eval_func: Callable):\n\n # First element is the weighted average eval_func of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum(\n [\n eval_func(y[arr == value]) * np.sum(arr == value) / len(y)\n for value in set(arr)\n ]\n ),\n -1\n * sum(\n [\n pipe(\n np.sum(arr == value) / len(y),\n lambda ratio: ratio * np.log(ratio),\n )\n for value in set(arr)\n ]\n ),\n )", "def conceptcover(bin_arr, limit=1, uncovered=0.1):\n arr = np.copy(bin_arr)\n arr_sum = np.sum(arr)\n result = []\n while True:\n k = kernel(arr)\n i = intent(bin_arr, k)\n e = extent(bin_arr, i)\n if len(e)*len(i) < limit or (e, i) in result: break\n result.append((e, i))\n arr = removed(arr, e, i)\n if np.sum(arr)/arr_sum < uncovered: break\n return result", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n return np.array([segment[0] + i / n_bins * (segment[1] - segment[0])\n for i in range(n_bins)]\n + [float(segment[1])])", "def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data", "def get_split_goodness_fit_continuous (\n feature_array: np.ndarray, target_array: np.ndarray, split: float, evaluate_function: Callable\n ):\n # Get above and below the split value\n above = feature_array >= split\n below = feature_array < split\n\n # Get weighted average evaluate_function on the splits\n n_above = np.sum ( above )\n above_eval = (\n evaluate_function ( target_array [ above ] ) * n_above / len ( target_array )\n ) # Weight = frac points in above\n below_eval = (\n evaluate_function ( target_array [ below ] ) * ( len ( target_array ) - n_above ) / len ( target_array )\n ) # Weight = frac points not in above\n\n # returns weighted sum of evaluate_function across splits & the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum (\n map (\n lambda x: x * np.log ( x ),\n [ n_above / len ( target_array ), ( len ( target_array ) - n_above ) / len ( target_array ) ],\n )\n ),\n ) # End get_split_goodness_fit_continuous", "def get_split_goodness_fit_continuous(\n arr: np.ndarray, y: np.ndarray, split: float, eval_func: Callable\n ):\n # Get above and below the split value\n above = arr >= split\n below = arr < split\n\n # get weighted average eval_func on the splits\n n_above = np.sum(above)\n above_eval = (\n eval_func(y[above]) * n_above / len(y)\n ) # weight = frac points in above\n below_eval = (\n eval_func(y[below]) * (len(y) - n_above) / len(y)\n ) # weight = frac points not in above\n\n # returns weighted sum of eval_func across splits, and the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum(\n map(\n lambda x: x * np.log(x),\n [n_above / len(y), (len(y) - n_above) / len(y)],\n )\n ),\n )", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def calc_features(self, instance):\n results = []\n\n for attribute, function in FeatureCalcerMeanCtr.FUNCTIONS:\n attribute_value = function(instance, attribute)\n if attribute_value not in self.attr2ctr[attribute]:\n results.append(self.mean_ctr)\n results.append(self.mean_ctr)\n results.append(0)\n continue\n clicks, impressions = self.attr2ctr[attribute][attribute_value]\n results.append(self.calc_ctr(clicks, impressions))\n results.append(self.calc_ctr(clicks, impressions, 0.08, 75))\n results.append(impressions)\n return results", "def batch_eval(f, pts):\n\n\t# Use this array to send into FEniCS.\n\tout = np.zeros(1)\n\n\tdef gen():\n\t\tfor pt in pts.reshape(2, -1).T:\n\t\t\tf.eval(out, pt)\n\t\t\tyield out[0]\n\n\tvalues = list(gen())\n\tavalues = np.array(values).reshape(pts.shape[1:])\n\treturn avalues", "def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out", "def _extract(self, n_elements, n_warmup_functions,\n counters, threshold, n_extractions, decay):\n input_list = np.arange(n_elements)\n\n pre_exponentiation_list = np.arange(n_elements)\n # zero the already extracted values\n inidices_not_to_extract = tuple(np.where(counters >= threshold))\n pre_exponentiation_list[inidices_not_to_extract] = 0\n #print('pre_exponentiation_list after zero: ', pre_exponentiation_list[:40])\n # rescale values so that the smallest is 1\n # get the smallest (non-zero!) value\n # bring 0s artificially to max value\n pre_exponentiation_list[inidices_not_to_extract] = np.max(pre_exponentiation_list)\n min_value = np.min(pre_exponentiation_list)\n pre_exponentiation_list = pre_exponentiation_list - (min_value - 1)\n # bring the already extracted back to 0 (since we just got them negative)\n pre_exponentiation_list[inidices_not_to_extract] = 0\n #print('pre_exponentiation_list after min subtract: ', pre_exponentiation_list[:40])\n\n # create probabilities exponential decay\n # so that it is more probable to select elements from the head\n # esponential decay y = a * (1 - b) * x\n max_prob = 1\n exp_list = np.array([max_prob * ((1 - decay)**e) for e in pre_exponentiation_list])\n\n # remove elements that have counter above thresholds\n # aka we already extracted them the required number of times\n inidices_not_to_extract = tuple(np.where(counters >= threshold))\n exp_list[inidices_not_to_extract] = 0\n\n total_value = sum(exp_list)\n probab_list = np.array([e/total_value for e in exp_list])\n logger.debug('Eponential probabilities (after normalization)')\n logger.debug(probab_list)\n # extract indices\n logger.debug('non zero: ' + str(np.count_nonzero(probab_list)))\n extracted_indices = \\\n np.random.choice(input_list, n_extractions + n_warmup_functions,\n p=probab_list, replace=False)\n logger.debug(extracted_indices)\n indices_to_consider = extracted_indices[:n_extractions]\n logger.debug(indices_to_consider)\n # update counters\n counters[indices_to_consider] = counters[indices_to_consider] + 1\n # reorder so that the wormup are at the beginning\n warmup_first_extracted_indices = extracted_indices[n_extractions:]\n warmup_first_extracted_indices = \\\n np.concatenate((extracted_indices[-n_warmup_functions:], extracted_indices[:n_extractions]))\n logger.info(warmup_first_extracted_indices)\n return warmup_first_extracted_indices, counters", "def compute_iterations(self):\n\n nb_iter = min([len(a.ysec_iter) for a in self], 0)\n # syncronize all iterations to a single one\n for oneresult in self:\n oneresult.change_iterations_number(nb_iter)\n \n # compute value error for each iteration\n for i in range(nb_iter):\n value = [one.ysec_iter[i] for one in self]\n error = [one.yerr_iter[i]**2 for one in self]\n \n # store the value for the iteration\n self.ysec_iter.append(sum(value))\n self.yerr_iter.append(math.sqrt(sum(error)))", "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res", "def fcn(self, data_in):\n \n assert isinstance(data_in, _np.ndarray), 'Required input is an ndarray'\n\n assert data_in.ndim == 1, 'Required input is a 1D ndarray'\n \n data_out = 0*data_in\n\n cutter = CutEveryNSpectra(self.parameters['offset'], cut_m=self.parameters['cut_m'],\n every_n=self.parameters['every_n'], action=self.parameters['action'])\n\n # Because of the limits of PlotEffect, the input and output data HAS TO BE the same size\n temp = cutter.calculate(_np.repeat(data_in[:,None], 11, axis=-1)).sum(axis=-1)\n data_out[:temp.size] = temp\n \n return data_out", "def evaluate(self, test_data, split=2):\n size = int(len(test_data) / split)\n mini_batch_split = np.arange(size, len(test_data), size)\n mini_batches = np.split(test_data, mini_batch_split)\n total = 0\n\n for mini_batch in mini_batches:\n xs, ys = np.array(mini_batch).T\n xs = cp.array(cp.vstack(xs).astype(np.float64).reshape((-1, self.sizes[0], 1)))\n ys = cp.array(ys.astype(np.int64))\n total += cp.sum(cp.argmax(self.feedforward(xs), axis=(1, 2)) == ys)\n return total", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def roccurve(signals, bkgs, cut_function, cut_values):\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values)\n return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg", "def feature_processing(array2d):\n new_array2d = np.zeros([array2d.shape[0], 29])\n # items/ orders\n new_array2d[:, 0] = array2d[:, 4] / array2d[:, 3]\n # cancels / orders\n new_array2d[:, 1] = array2d[:, 5] / array2d[:, 3]\n # returns / items\n new_array2d[:, 2] = array2d[:, 6] / array2d[:, 4]\n # voucher / orders\n new_array2d[:, 3] = array2d[:, 10] / array2d[:, 3]\n # female_items / female_items + male_items\n new_array2d[:, 4] = array2d[:, 15] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # male_items / female_items + male_items\n new_array2d[:, 5] = array2d[:, 16] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # unisex_items / items\n new_array2d[:, 6] = array2d[:, 17] / array2d[:, 4]\n # wapp_items / items\n new_array2d[:, 7] = array2d[:, 18] / array2d[:, 4]\n # wftw_items / items\n new_array2d[:, 8] = array2d[:, 19] / array2d[:, 4]\n # mapp_items / items\n new_array2d[:, 9] = array2d[:, 20] / array2d[:, 4]\n # wacc_items / items\n new_array2d[:, 10] = array2d[:, 21] / array2d[:, 4]\n # macc_items / items\n new_array2d[:, 11] = array2d[:, 22] / array2d[:, 4]\n # mftw_items / items\n new_array2d[:, 12] = array2d[:, 23] / array2d[:, 4]\n # wspt_items / items\n new_array2d[:, 13] = array2d[:, 24] / array2d[:, 4]\n # mspt_items / items\n new_array2d[:, 14] = array2d[:, 25] / array2d[:, 4]\n # curvy_items / items\n # Curvy item has a strong correlation with gender, however they are very right-skewed use np.power(1/6) to smooth it\n new_array2d[:, 15] = np.power(array2d[:, 26] / array2d[:, 4], 1 / 6)\n # sacc_items / items\n new_array2d[:, 16] = array2d[:, 27] / array2d[:, 4]\n # msite_orders / orders\n new_array2d[:, 17] = array2d[:, 28] / array2d[:, 3]\n # desktop_orders / orders\n new_array2d[:, 18] = array2d[:, 29] / array2d[:, 3]\n # android_orders / orders\n new_array2d[:, 19] = array2d[:, 30] / array2d[:, 3]\n # ios_orders / orders\n new_array2d[:, 20] = array2d[:, 31] / array2d[:, 3]\n # other_device_orders / orders\n new_array2d[:, 21] = array2d[:, 32] / array2d[:, 3]\n # work_orders / orders\n new_array2d[:, 22] = array2d[:, 33] / array2d[:, 3]\n # home_orders / orders\n new_array2d[:, 23] = array2d[:, 34] / array2d[:, 3]\n # parcelpoint_orders / orders\n new_array2d[:, 24] = array2d[:, 35] / array2d[:, 3]\n # other_collection_orders / orders\n new_array2d[:, 25] = array2d[:, 36] / array2d[:, 3]\n # average_discount_onoffer\n new_array2d[:, 26] = array2d[:, 39]\n # average_discount_used\n new_array2d[:, 27] = array2d[:, 40]\n # revenue / order\n new_array2d[:, 28] = array2d[:, 41] / array2d[:, 3]\n\n # normalize by each feature\n new_array2d = normalize(new_array2d, axis=0, norm='max')\n return new_array2d", "def game_function(\n game, function, num_resamples, num_returned, *, percentiles=None, processes=None\n):\n results = np.empty((num_resamples, num_returned))\n\n chunksize = num_resamples if processes == 1 else 4\n with multiprocessing.Pool(processes) as pool:\n for i, res in enumerate(\n pool.imap_unordered(\n functools.partial(_resample_function, function, game),\n range(num_resamples),\n chunksize=chunksize,\n )\n ):\n results[i] = res\n\n if percentiles is None: # pylint: disable=no-else-return\n results.sort(0)\n return results.T\n else:\n return np.percentile(results, percentiles, 0).T", "def get_bc_array_for_all_frequencies(self, loaded_table, boundary_condition):\n if self.frequencies is None:\n number_frequencies = 1\n else:\n number_frequencies = len(self.frequencies)\n\n if loaded_table:\n list_arrays = [np.zeros(number_frequencies, dtype=float) if bc is None else bc[0:number_frequencies] for bc in boundary_condition]\n self.no_table = False\n else:\n list_arrays = [np.zeros(number_frequencies, dtype=float) if bc is None else np.ones(number_frequencies, dtype=float)*bc for bc in boundary_condition]\n\n return list_arrays", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n errors = []\n cleaned_errors = []\n\n ### your code goes here\n count = 0\n for p in predictions:\n errors.append((net_worths[count] - p) * (net_worths[count] - p))\n count = count + 1\n \n sorted_errors = sorted(errors)\n outlier_definer = sorted_errors[80]\n\n cleaned_net_worths = net_worths[errors < outlier_definer]\n cleaned_ages = ages[errors < outlier_definer]\n cleaned_predictions = predictions[errors < outlier_definer]\n\n print(len(cleaned_net_worths))\n\n count = 0\n for p in cleaned_predictions:\n cleaned_errors.append((cleaned_net_worths[count] - p) * (cleaned_net_worths[count] - p))\n count = count + 1\n\n cleaned_data = tuple(zip(cleaned_ages, cleaned_net_worths, cleaned_errors))\n \n return cleaned_data" ]
[ "0.56539094", "0.52569467", "0.5236992", "0.5231127", "0.5104325", "0.5093013", "0.5085143", "0.5064352", "0.4961732", "0.49305794", "0.49301794", "0.4917983", "0.48857465", "0.4866057", "0.48591626", "0.48007303", "0.47892055", "0.47815204", "0.4772867", "0.47728154", "0.4765887", "0.47539708", "0.47531417", "0.47531208", "0.47527468", "0.47428644", "0.47381568", "0.47340143", "0.47275987", "0.47232923" ]
0.77393055
0
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def get_events_passing_cuts(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"read\")\n\n #Write events that pass cut to a file \n txt_dir = script_utils.create_directory(\"./Text_files/Simulated_sensitivity/\")\n with open(txt_dir + \"/simulated_events_passing_cut_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".txt\", \"w\") as fout:\n\n fout.write(\"heat_fraction,exposure,num_events_passing_cut\\n\")\n\n #Loop over possible exposure values\n for exposure in [10, 50, 100, 500]:\n script_utils.print_utility(\"Getting events passing cut for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(1./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n fsensi = file_root.Get(\"sensitivity_expo_\" + str(exposure))\n cut_val = fsensi.GetMinimumX(2,10)\n\n #Run Poisson simulations\n list_event_pass_cut=[]\n for nsimu in range(100):\n hdatasimu = TH1F(\"hdatasimu\",\"hdatasimu\", bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n hdatasimu.SetBinContent(i, np.random.poisson(hsum_bckg.GetBinContent(i)))\n bin_cut = hdatasimu.FindBin(cut_val)\n num_entry_cut = int(hdatasimu.Integral(bin_cut, max_X))\n list_event_pass_cut.append(str(num_entry_cut))\n del hdatasimu\n fout.write(heat_fraction[1:] + \",\" + str(exposure) + \",\" + \",\".join(list_event_pass_cut) + \"\\n\")", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def get_fidcuts():\n return combine_cuts([fid_cuts('muN_pt', 'muN_eta'),\n fid_cuts('muP_pt', 'muP_eta')])", "def bessel_bandpass_filter(data, lowcut, highcut, fs, order=2):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n # bessel() and lfilter() are from scipy.signal\n\n b, a = bessel(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target", "def filt_bp(sig: np.ndarray, Ss: int, Cfs0: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff1 = Cfs0 / nyq\n normal_cutoff2 = Cfs1 / nyq\n b, a = butter(order, (normal_cutoff1, normal_cutoff2),\n btype='band',\n analog=False)\n return lfilter(b, a, sig)", "def butter_bp_coe(lowcut, highcut, fs, order=1):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a", "def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def targetFromSignals(obars, nbands=3, amount=1, targetprofit=15., stoploss=45.):\n # bandsg, yband, ask, bid, day, amount, targetprofit, stoploss\n bars = obars.copy()\n for j in range(nbands): # for each band traverse it\n ibandsg = bars.columns.get_loc('bandsg'+str(j))\n # being pessimistic ... right\n ybandsell = traverseSellBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n ybandbuy = traverseBuyBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n bars['y'+str(j)] = mergebandsignals(ybandsell, ybandbuy)\n\n return bars", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def efficient_binned_tagged_jets_hist(datalist,discriminant, discriminant_cuts, CSV_cuts, bins, nbins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"binned_tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cuts[bin_number]: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[17] != 0:\n L = particle[20]/float(particle[17])\n else:\n continue\n if L >= discriminant_cuts[bin_number]: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(\"jet p_{T} (GeV)\")\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def cut(S, T, graph):\n ###TODO\n pass", "def butter_bandstop_filter(data, lowcut, highcut, fs, order):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n i, u = sg.butter(order, (low, high), btype='bandstop')\n y = sg.filtfilt(i, u, data)\n return y", "def get_data_and_cuts(args):\n\n if args['verbose'] >= 2:\n print(\"Load data\\n\", flush=True)\n data = get_dataset(args)\n\n if args['verbose'] >= 2:\n print(\"Find cuts\", flush=True)\n cuts = get_cuts(data, args, verbose=args['verbose'])\n if args['verbose'] >= 2:\n print(f'\\tI found {len(cuts.values)} cuts\\n')\n\n print(\"Compute cost\", flush=True)\n cost_function = get_cost_function(data, args)\n cuts = compute_cost_and_order_cuts(cuts, cost_function)\n\n cuts = pick_cuts_up_to_order(cuts,\n percentile=args['experiment']['percentile_orders'])\n if args['verbose'] >= 2:\n max_considered_order = cuts.costs[-1]\n print(f\"\\tI will stop at order: {max_considered_order}\")\n print(f'\\tI will use {len(cuts.values)} cuts\\n', flush=True)\n\n if args['plot']['cuts']:\n if args['verbose'] >= 2:\n print(f\"\\tPlotting cuts\")\n\n plot_cuts(data, cuts,\n nb_cuts_to_plot=args['plot']['nb_cuts'],\n path=args['plot_dir'])\n\n return data, cuts", "def place(self, sig, bg_x, bg_y, cut_1_range, cut_2_range):\n assert bg_x.shape == bg_y.shape\n npts_1, npts_2 = bg_x.shape\n\n c1_bin_bounds = np.linspace(*cut_1_range, num=(npts_1 + 1))\n c1_bin = np.digitize([self._cut_1], c1_bin_bounds) - 1\n\n c2_bin_bounds = np.linspace(*cut_2_range, num=(npts_2 + 1))\n c2_bin = np.digitize([self._cut_2], c2_bin_bounds) - 1\n\n if any(b < 0 for b in [c1_bin, c2_bin]): \n raise ValueError(\"can't put a cut in the underflow bin\")\n \n eff = float(sig[c1_bin, c2_bin] / sig.max())\n\n def get_rej(bkg_array): \n array_val = bkg_array.max() / bkg_array[c1_bin, c2_bin]\n return float(array_val)\n rej_x, rej_y = [get_rej(ar) for ar in [bg_x, bg_y]]\n\n self._xyz = rej_x, rej_y, eff\n self._cut_ranges = (cut_1_range, cut_2_range)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def cut(\n self,\n bins,\n **kwargs,\n ):\n\n def squeeze_and_cut(df, *args, **kwargs):\n # We need this function to ensure we squeeze our internal\n # representation (a dataframe) to a Series.\n series = df.squeeze(axis=1)\n return pandas.cut(series, *args, **kwargs)\n\n # We use `default_to_pandas` here since the type and number of\n # results can change depending on the input arguments.\n return self.default_to_pandas(squeeze_and_cut, bins, **kwargs)", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def reformat_cuts(input_cuts):\n output_cuts = []\n for cut in input_cuts:\n cut = list(cut)\n if cut[1]==None:\n cut[1]=float(\"-inf\")\n if cut[2]==None:\n cut[2]=float(\"inf\")\n cut = tuple(cut)\n output_cuts.append(cut)\n return output_cuts", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def get_sensi_eff_curves_various_exp(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n # print d_scaling\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"recreate\")\n\n #Loop over possible exposure values\n # for exposure in [10, 50, 100, 500]:\n for exposure in [66]:\n script_utils.print_utility(\"Getting sensi + eff for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(8000./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n # print hsum_bckg.Integral(hsum_bckg.FindBin(3.5), bin_X)\n # print d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(3.5), bin_X)/d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral()\n\n hs=THStack(\"hs\", \"hs\")\n for hist in list_hist_bckg + [d_hist[\"WIMP_mass_\" + str(WIMP_mass)]]:\n hs.Add(hist)\n\n # cc = TCanvas(\"cc\", \"cc\")\n # h1=TH1F(\"h1\",\"h1\", bin_X, min_X, max_X)\n # PyRPl.process_TH1(h1, X_title=\"BDT ouput\", min_Y = 1E-1, max_Y = 20000)\n \n # gPad.SetLogy()\n # h1.Draw()\n # hs.Draw(\"same\")\n # raw_input()\n\n class Sensitivity:\n def __call__( self, x, par ):\n\n bin_number_sig = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n bin_number_bckg = hsum_bckg.FindBin(x[0])\n eff_sig = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number_sig, bin_X))\n exp_bckg = hsum_bckg.Integral(bin_number_bckg, bin_X)\n\n vec_proba = [TMath.PoissonI(i, exp_bckg) for i in range(500)] \n lim_Poisson_bckg = np.sum(np.array([PoissonCL.compute_90CL_limit(i)*vec_proba[i] for i in range(500)]))\n\n if eff_sig<=0:\n return 1E10\n else:\n return lim_Poisson_bckg/eff_sig + par[0]\n\n class Signal_eff:\n def __call__( self, x, par ):\n\n bin_number = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n integ = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number, bin_X))/float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n return par[0] + integ\n\n h = TH1F(\"h\", \"h\",100, 0, 10)\n PyRPl.process_TH1(h, X_title = \"BDT cut\", Y_title = \"Sensitivity (a.u.)\")\n h.SetMinimum(1)\n h.SetMaximum(1E3)\n # h.Draw()\n\n fopt = TF1(\"sensitivity_expo_\" + str(exposure), Sensitivity(), 0,10, 1)\n fopt.SetParameter(0,0)\n fopt.SetNpx(100)\n # fopt.Draw(\"same\")\n\n fsig_eff = TF1(\"signal_eff_expo_\" + str(exposure), Signal_eff(), 0,10, 1)\n fsig_eff.SetParameter(0,0)\n fsig_eff.SetNpx(500)\n\n min_X = fopt.GetMinimumX(2,10)\n print \"signal eff\", fsig_eff.Eval(min_X)\n print \"bckg_exp\", hsum_bckg.Integral(hsum_bckg.FindBin(min_X), bin_X)\n\n # fopt.Write()\n # fsig_eff.Write()\n\n # gPad.SetLogy()\n # raw_input()\n # del h \n\n # file_root.Close()", "def geneffcut(energy, array, cutvals=hads, bins=BINS):\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = 0.\n binning[binning >= len(bins)-1] = 0.\n hadeffcut = np.zeros(len(energy), dtype=bool)\n for i, cutval in enumerate(cutvals):\n binmask = binning == i\n hadeffcut[binmask] = array[binmask] < cutval\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = -1\n binning[binning >= len(bins)-1] = -1\n hadeffcut[binning == -1] = 0\n\n return hadeffcut", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y" ]
[ "0.55629873", "0.5544333", "0.54654413", "0.53966707", "0.5293295", "0.5175403", "0.5154368", "0.50937045", "0.5059277", "0.5027049", "0.50096345", "0.49881732", "0.49795693", "0.4978986", "0.49496424", "0.4933744", "0.4921186", "0.49105307", "0.49013457", "0.48807377", "0.48686138", "0.48626393", "0.48209476", "0.4818179", "0.48062032", "0.47978446", "0.47949582", "0.4768826", "0.47638527", "0.47498536" ]
0.6186934
0
Basic plotting style for a single roccurve, based on multiple signal and bkgs samples. Expects an ax object to be given, this function is not standalone
def plot_roccurve(signals, bkgs, cut_function, cut_values, ax): eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values) return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def show_rgn(ax, rgn, **kwargs):\n \n alpha = 0.1\n #lw = 0.1\n \n if rgn['shape'] == 'box':\n ax.plot([rgn['params']['blcx']]*2, \n [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs)\n ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], \n [rgn['params']['blcy']]*2, 'r-', **kwargs)\n ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], \n [rgn['params']['trcy']]*2, 'r-', **kwargs)\n ax.plot([rgn['params']['trcx']]*2, \n [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs)\n \n elif rgn['shape'] == 'circle':\n patch = mpatches.Circle((rgn['params']['cx'], rgn['params']['cy']), \n rgn['params']['r'], alpha=alpha, transform=ax.transData)\n #plt.figure().artists.append(patch)\n ax.add_patch(patch)\n \n elif rgn['shape'] == 'polygon':\n for poly in rgn['params']['Polygons']:\n patch = mpatches.Polygon(poly.get_vertices(), closed=True, \n alpha=alpha, transform=ax.transData)\n ax.add_patch(patch)\n \n elif rgn['shape'] == 'pixel':\n ax.plot(region['params']['cy'], region['params']['cx'], 'rs', ms=5)", "def plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quant,\n plot=True):\n\n fs = 16\n\n fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}\n\n fprs['all'] = {}\n tprs['all'] = {}\n ctds['all'] = {}\n brss['all'] = {}\n \n for group in groups:\n\n fprs[group] = {}\n tprs[group] = {}\n ctds[group] = {}\n brss[group] = {}\n \n for fold in set(folds):\n \n ate = a[folds == fold]\n str_test = baseline_models.structure_for_eval_(t[folds == fold],\n e[folds == fold])\n \n if len(set(folds)) == 1:\n \n atr = ate\n str_train = str_test\n \n else:\n atr = a[folds != fold]\n str_train = baseline_models.structure_for_eval_(t[folds != fold],\n e[folds != fold])\n\n t_tr_max = np.max([t_[1] for t_ in str_train])\n t_ = np.array([t_[1] for t_ in str_test])\n \n clean = (t_<=t_tr_max)\n \n str_test = str_test[t_<=t_tr_max]\n ate = ate[t_<=t_tr_max]\n \n scores_f = scores[fold][clean]\n \n for group in groups:\n \n te_protg = (ate == group)\n tr_protg = (atr == group)\n \n try:\n roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],\n -scores_f[te_protg], [quant])\n brs_m = brier_score(str_train[tr_protg], str_test[te_protg],\n scores_f[te_protg], quant)\n ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],\n -scores_f[te_protg], quant)[0]\n \n except:\n roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],\n -scores_f[te_protg], [quant])\n brs_m = brier_score(str_train, str_test[te_protg],\n scores_f[te_protg], quant)\n ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],\n -scores_f[te_protg], quant)[0]\n \n fprs[group][fold] = roc_m[0][0][1] \n tprs[group][fold] = roc_m[0][0][0] \n ctds[group][fold] = ctd_m\n brss[group][fold] = brs_m[1][0]\n \n roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])\n ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]\n brs_m = brier_score(str_train, str_test, scores_f, quant)\n \n fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]\n ctds['all'][fold] = ctd_m\n brss['all'][fold] = brs_m[1][0]\n \n cols = ['b', 'r', 'g']\n\n roc_auc = {}\n ctds_mean = {}\n brss_mean = {}\n \n j = 0\n\n for group in list(groups) + ['all']:\n\n all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))\n\n # The ROC curves are interpolated at these points.\n mean_tprs = []\n for i in set(folds):\n mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))\n\n # Finally the interpolated curves are averaged over to compute AUC.\n mean_tpr = np.mean(mean_tprs, axis=0)\n std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)\n\n fprs[group]['macro'] = all_fpr\n tprs[group]['macro'] = mean_tpr\n tprs_std[group] = std_tpr\n\n roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])\n\n ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])\n brss_mean[group] = np.mean([brss[group][fold] for fold in folds])\n \n lbl = str(group)\n lbl += ' AUC:' + str(round(roc_auc[group], 3))\n lbl += ' Ctd:'+ str(round(ctds_mean[group], 3))\n lbl += ' BS:'+ str(round(brss_mean[group], 3))\n \n if plot:\n ax.plot(\n all_fpr,\n mean_tpr,\n c=cols[j],\n label=lbl)\n\n ax.fill_between(\n all_fpr,\n mean_tpr - std_tpr,\n mean_tpr + std_tpr,\n color=cols[j],\n alpha=0.25)\n\n j += 1\n \n if plot:\n ax.set_xlabel('False Positive Rate', fontsize=fs)\n ax.set_ylabel('True Positive Rate', fontsize=fs)\n ax.legend(fontsize=fs)\n ax.set_xscale('log')\n\n return roc_auc, ctds_mean, brss_mean", "def plot_ROC_curves(fig, ax, y_all, perf, title=None):\n curves = {'IMPRESS_all': 'royalblue',\n 'IMPRESS_HE_only': 'plum',\n 'IMPRESS_IHC_only': 'pink',\n 'pathologists_eval': 'tomato'}\n \n type_convert = {'IMPRESS_all': 'IMPRESS',\n 'IMPRESS_HE_only': 'IMPRESS (H&E only)',\n 'IMPRESS_IHC_only': 'IMPRESS (IHC only)',\n 'pathologists_eval': 'Pathologists'}\n \n for fgroup in curves.keys():\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n ax.set_aspect('equal')\n for seed in range(int(y_all[fgroup].shape[1]/3)):\n y_true = y_all[fgroup].loc[:,'y_true'].iloc[:,seed]\n y_pred_proba = y_all[fgroup].loc[:,'y_pred_proba'].iloc[:,seed]\n tpr, fpr, treshold = roc_curve(y_true, 1-y_pred_proba)\n tprs.append(np.interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr, tpr, color=curves[fgroup], linewidth=2, alpha=0.10, label=None)\n \n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n \n ax.plot(mean_fpr, mean_tpr, color=curves[fgroup],\n label=r'%s (AUC = %0.4f $\\pm$ %0.2f)' % \\\n (type_convert[fgroup], perf[fgroup].loc['AUC','mean'], perf[fgroup].loc['AUC','std']),\n linewidth=3.0, alpha=0.80)\n \n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n \n if fgroup == 'IMPRESS_all':\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=r'$\\pm$ 1 standard deviation')\n else:\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=None)\n \n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n x = [0.0, 1.0]\n plt.plot(x, x, linestyle='dashed', color='red', linewidth=2.0, label='Random')\n plt.legend(fontsize=10, loc='best')\n \n if title is not None:\n fig.suptitle(t=title, fontsize=12)\n return fig", "def one_data_figure_shaded(obs, axobj, color='Blue', facecolor='Blue',\n **kwargs):\n \n x, y, e = obs['wavelength'], obs['spectrum'], obs['unc']\n axobj.fill_between(x, y-e, y+e, facecolor='grey', alpha=0.3)\n axobj.plot(x, y, color = color, linewidth = 0.5,**kwargs)\n\n return axobj", "def plot_roc_curves(labels, probas, name='', ax=None):\n # Setup axis\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 10))\n\n plot_roc(labels, probas, name=name, ax=ax)\n\n # Plot chance\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', alpha=.8)\n\n # Fill bottom right\n ax.fill_between([0, 1], [0, 1], alpha=0.3, color='black')\n\n # Settings\n ax.set_xlabel('False Positive Rate or (1 - Specifity)', fontsize=15)\n ax.set_ylabel('True Positive Rate or (Sensitivity)', fontsize=15)\n ax.set_title('Receiver Operating Characteristic', weight='bold', fontsize=18)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.legend(loc='lower right')\n\n return ax", "def _rfigure(self, legend=True, fig=None, ax=None):\n if fig is None and ax is None:\n fig, ax = plt.subplots()\n suptitle = True\n elif fig is None:\n fig = ax.get_figure()\n suptitle = False\n elif ax is None:\n ax = fig.gca()\n suptitle = False\n\n ax.grid(True)\n\n line_rstr = None\n line_rrls = None\n line_lstr = None\n line_lrls = None\n line_minima = None\n line_maxima = None\n t = self.timevector\n for axis, trace in zip('xy', ['positionX', 'positionY']):\n s = self.get_data(traces=trace) * 1e6 # m -> µm\n r_str_rls = self.stress_release_pairs(axis=axis, direction='right')\n l_str_rls = self.stress_release_pairs(axis=axis, direction='left')\n rstr = r_str_rls['stress']['idx']\n lstr = l_str_rls['stress']['idx']\n rrls = r_str_rls['release']['idx']\n lrls = l_str_rls['release']['idx']\n\n ax.plot(t, s, lw=0.1, ms=2, color='k', alpha=1.0)\n\n # line_rstr = None\n # line_rrls = None\n # line_lstr = None\n # line_lrls = None\n for rstr, rrls in zip(rstr, rrls):\n line_rstr, = ax.plot(t[rstr], s[rstr], lw=0.4, ms=2, color='m')\n line_rrls, = ax.plot(t[rrls], s[rrls], lw=0.4, ms=2, color='c')\n for lstr, lrls in zip(lstr, lrls):\n line_lstr, = ax.plot(t[lstr], s[lstr], lw=0.4, ms=2, color='g')\n line_lrls, = ax.plot(t[lrls], s[lrls], lw=0.4, ms=2, color='y')\n\n # line_minima = None\n # line_maxima = None\n for segment in self._sf.sections[axis]:\n minima = self.undecimate_and_limit(segment['minima'])\n maxima = self.undecimate_and_limit(segment['maxima'])\n line_minima, = ax.plot(t[minima], s[minima], '.', ms=5,\n color='b')\n line_maxima, = ax.plot(t[maxima], s[maxima], '.', ms=5,\n color='r')\n\n line_excited_x = None\n for x_c in (self.undecimate_and_limit(self._sf.excited['x'])\n / self.resolution):\n line_excited_x = ax.hlines(0.0, x_c[0], x_c[1], alpha=1,\n colors='b', linestyle='solid', lw=1)\n # ax.plot(x_c[0], 0.5, '.k', alpha=1, ms=3)\n # ax.plot(x_c[1], 0.5, '.k', alpha=1, ms=3)\n ax.vlines(x_c[0], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n ax.vlines(x_c[1], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n\n line_excited_y = None\n for y_c in (self.undecimate_and_limit(self._sf.excited['y'])\n / self.resolution):\n line_excited_y = ax.hlines(0.0, y_c[0], y_c[1], alpha=1,\n colors='r', linestyle='solid', lw=1)\n # ax.plot(y_c[0], -0.5, '.k', alpha=1, ms=3)\n # ax.plot(y_c[1], -0.5, '.k', alpha=1, ms=3)\n ax.vlines(y_c[0], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n ax.vlines(y_c[1], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n\n ax.set_xlim((t[0], t[-1]))\n\n ax.set_xlabel(\"Time (s)\")\n ax.set_ylabel(\"Signal positionX and Y (µm)\")\n if suptitle:\n fig.suptitle(\"Automatically detected excited axis, minima, \"\n \"maxima, and sections.\")\n\n if legend:\n if line_minima is not None:\n line_minima.set_label('minima')\n if line_maxima is not None:\n line_maxima.set_label('maxima')\n if line_rstr is not None:\n line_rstr.set_label('rightstress')\n if line_rrls is not None:\n line_rrls.set_label('rightrelease')\n if line_lstr is not None:\n line_lstr.set_label('leftstress')\n if line_lrls is not None:\n line_lrls.set_label('leftrelease')\n if line_excited_x is not None:\n line_excited_x.set_label('excited x')\n if line_excited_y is not None:\n line_excited_y.set_label('excited y')\n\n ax.legend(loc='upper right')\n\n return fig", "def _plot_ribbon_using_bezier(ax, zorder, points1, points2, color1=\"gray\",\n color2=\"gray\", lw=1):\n cc = ColorConverter()\n color1 = np.array(cc.to_rgba(color1))\n color2 = np.array(cc.to_rgba(color2))\n tRange = np.linspace(0, 1, 100)\n xpointsList = []\n ypointsList = []\n for points in [points1, points2]:\n points = np.array(points)\n p1 = points[0]\n p2 = points[1]\n p3 = points[2]\n p4 = points[3]\n allPoints = (p1[:, np.newaxis] * (1 - tRange) ** 3 + p2[:, np.newaxis]\n * (3 * (1 - tRange) ** 2 * tRange) + p3[:, np.newaxis] *\n (3 * (1 - tRange) * tRange ** 2) + p4[:, np.newaxis] *\n tRange ** 3)\n xpoints = allPoints[0]\n xpointsList.append(xpoints)\n ypoints = allPoints[1]\n ypointsList.append(ypoints)\n ax.plot(xpoints, ypoints, \"0.85\", lw=lw, zorder=zorder + 0.5)\n xpoints = xpointsList[0]\n if (mpl.colors.colorConverter.to_rgba_array(color1) ==\n mpl.colors.colorConverter.to_rgba_array(color2)).all():\n ax.fill_between(xpoints, ypointsList[0], ypointsList[1], lw=lw,\n facecolor=color1, edgecolor=color1, zorder=zorder)\n else:\n for i in range(len(tRange) - 1):\n #mean = (tRange[i]+tRange[i+1])*0.5\n xnow = np.mean(xpoints[i:i + 2])\n norm_mean = (xnow - xpoints[0]) / (xpoints[-1] - xpoints[0])\n color = color1 * (1 - norm_mean) + color2 * norm_mean\n ax.fill_between(xpoints[i:i + 2], ypointsList[0][i:i + 2],\n ypointsList[1][i:i + 2], lw=lw, facecolor=color,\n edgecolor=color, zorder=zorder)", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def plot_model_curves(class_name, model, range_metrics, ax):\n def plot_axis(ax, data, color):\n \"\"\"\n Plot data on axis in certain color\n \"\"\"\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])\n # Get balanced purities\n preds = np.concatenate(model.results)\n if model.name == \"Binary Classifiers\":\n purities = get_binary_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n else:\n purities = get_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n\n # Get completenesses\n comps = get_completeness_ranges(model.class_counts, range_metrics, class_name)\n\n print(\"\\n\\n Model: \" + str(model.name) + \", class: \" + class_name)\n print(\"Completeness\")\n print(comps)\n print(\"Purity\")\n print(purities)\n\n plot_axis(ax, comps, C_BAR_COLOR)\n ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylim([0, 1])\n plot_axis(ax2, purities, P_BAR_COLOR)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n return ax2", "def plot_curve(self, fig, ax, linewidth=1.5, linestyle='-', color='black', u1=0.00, u2=1.00):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n u = np.linspace(u1, u2, 501)\n X = np.real(self.get_value(u))\n line, = ax.plot(u, X[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n u = np.linspace(u1, u2, 501)\n X, Y = np.real(self.get_value(u))\n line, = ax.plot(X, Y)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n u = np.linspace(u1, u2, 501)\n X, Y, Z = np.real(self.get_value(u))\n line, = ax.plot(X, Y, Z)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n return fig, ax", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "def plot_roc_curve(tprs, aucs, tag=''):\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_fpr = np.linspace(0, 1, 100)\n\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"lower right\")\n plt.tight_layout()\n plt.savefig(f'roc_{tag}.png')\n plt.show()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot_curves():\n lm = np.arange(0, 1.8, .01)\n vm = np.arange(-1.2, 1.2, .01)\n lt = np.arange(0, 1.07, .01)\n plt.subplot(2,1,1)\n plt.plot(lm, force_length_muscle(lm), 'r')\n plt.plot(lm, force_length_parallel(lm), 'g')\n plt.plot(lt, force_length_tendon(lt), 'b')\n plt.legend(('CE', 'PE', 'SE'))\n plt.xlabel('Normalized length')\n plt.ylabel('Force scale factor')\n plt.subplot(2, 1, 2)\n plt.plot(vm, force_velocity_muscle(vm), 'k')\n plt.xlabel('Normalized muscle velocity')\n plt.ylabel('Force scale factor')\n plt.tight_layout()\n plt.show()", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def plot_forest(self):\n ax, = az.plot_forest(self.ifd_, var_names=[\"avg\", \"a_coef\", \"b_vals_coef\", \"b_mask_coef\", \"c_vals_coef\", \"c_mask_coef\"])\n ax.axvline(0, linestyle=':', color='black')\n # return ax", "def plot_along(a, title=''):\n f, ax = plt.subplots(2, figsize=(16, 16), dpi= 80, )#wspace=0, hspace=0)\n\n x = a['xo'].values\n pc = a['pc'].values\n pn = a['pn'].values\n crl = a['crl'].values\n chisqr = a['chisqr'].values\n\n for i in ax[1:2]:\n i.grid()\n i.xaxis.label.set_size(15)\n i.yaxis.label.set_size(15)\n i.tick_params(labelsize=15)\n i.title.set_size(20)\n\n ax[0].plot(x, pc, 'k', lw=3, label='$P_c$')\n ax[0].plot(x, pn, '.6', lw=3, label='$P_n$')\n ax[0].fill_between(x, pc, pn, where=pc >= pn, alpha=.2, label='Dominantly Specular')\n ax[0].fill_between(x, pc, pn, where=pc <= pn, alpha=.2, label='Dominatly Diffuse')\n ax[0].set_title('RSR-derived Coherent and Incoherent Energies', fontweight=\"bold\", fontsize=20)\n ax[0].set_ylabel('$[dB]$')\n ax[0].set_xlim(0, x.max())\n ax[0].legend(loc=3, ncol=2, fontsize='large')\n\n ax_chisqr = ax[1].twinx()\n ax_chisqr.plot(x, chisqr, '.6', lw=3)\n ax_chisqr.set_ylabel('Chi-square', color='.6')\n ax_chisqr.yaxis.label.set_size(15)\n ax_chisqr.tick_params(labelsize=15)\n\n ax[1].plot(x, crl, 'k', lw=3)\n ax[1].set_title('Quality Metrics', fontweight=\"bold\", fontsize=20)\n ax[1].set_ylabel('Correlation Coefficient')\n ax[1].set_xlim(0, x.max())\n ax[1].set_ylim(0, 1.1)\n ax[1].legend(loc=3, ncol=2, fontsize='large')\n ax[1].set_xlabel('Bin #')", "def plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n group,\n quant,\n strat='quantile',\n adj='IPCW', \n plot=True):\n\n allscores = np.ones_like(t).astype('float')\n\n for fold in set(folds):\n allscores[folds == fold] = scores[fold]\n\n scores = allscores\n\n b_fc = (0, 0, 1, .4)\n r_fc = (1, 0, 0, .2)\n\n b_ec = (0, 0, 1, .8)\n r_ec = (1, 0, 0, .8)\n\n n_bins = 20\n\n hatch = '//'\n\n fs = 16\n\n prob_true_n, _, outbins, ece = calibration_curve(\n scores,\n e,\n t,\n a,\n group,\n quant,\n typ=adj,\n ret_bins=True,\n strat=strat,\n n_bins=n_bins)\n \n for d in range(len(prob_true_n)):\n\n binsize = outbins[d + 1] - outbins[d]\n binloc = (outbins[d + 1] + outbins[d]) / 2\n\n gap = (prob_true_n[d] - binloc)\n\n if gap < 0:\n bottom = prob_true_n[d]\n else:\n bottom = prob_true_n[d] - abs(gap)\n\n if d == len(prob_true_n) - 1:\n lbl1 = 'Score'\n lbl2 = 'Gap'\n else:\n lbl1 = None\n lbl2 = None\n \n if plot:\n ax.bar(\n binloc,\n prob_true_n[d],\n width=binsize,\n facecolor=b_fc,\n edgecolor=b_ec,\n linewidth=2.5,\n label=lbl1)\n ax.bar(\n binloc,\n abs(gap),\n bottom=bottom,\n width=binsize,\n facecolor=r_fc,\n edgecolor=r_ec,\n linewidth=2.5,\n hatch=hatch,\n label=lbl2)\n\n d += 1\n \n if plot:\n \n ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)\n\n ax.set_xlabel('Predicted Score', fontsize=fs)\n ax.set_ylabel('True Score', fontsize=fs)\n\n ax.legend(fontsize=fs)\n ax.set_title(str(group), fontsize=fs)\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n\n ax.grid(ls=':', lw=2, zorder=-100, color='grey')\n ax.set_axisbelow(True)\n\n ax.text(\n x=0.030,\n y=.7,\n s='ECE=' + str(round(ece, 3)),\n size=fs,\n bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))\n\n return ece", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "def plot(self, ax, graph=None, graph_i=None, type_plot='', ignoreNext=0, boxplot=None, violinplot=None, violinplotkwargs={}):\n from grapa.graph import Graph\n handle = None\n # check default arguments\n if boxplot is None:\n boxplot = {'y': [], 'positions': [], 'labels': [], 'color': [],\n 'i': 0}\n if violinplot is None:\n violinplot = {'y': [], 'positions': [], 'labels': [], 'color': []}\n if graph is None:\n graph_i = None\n else:\n if graph[graph_i] != self:\n graph_i = None\n if graph_i is None:\n for c in range(len(graph)):\n if graph[c] == self:\n graph_i = c\n break\n if graph_i is None:\n graph = None # self was not found in graph\n print('Warning Curve.plot: Curve not found in provided Graph')\n\n # retrieve basic information\n alter = graph._getAlter() if graph is not None else ['', '']\n attr = self.getAttributes()\n linespec = self.attr('linespec')\n # construct dict of keywords based on curves attributes, in a very\n # restrictive way\n # some attributes are commands for plotting, some are just related to\n # the sample, and no obvious way to discriminate between the 2\n fmt = {}\n for key in attr:\n if not isinstance(key, str):\n print(type(key), key, attr[key])\n if ((not isinstance(attr[key], str) or attr[key] != '')\n and key in Graph.dataInfoKeysGraph\n and key not in ['plot', 'linespec', 'type', 'ax_twinx',\n 'ax_twiny', 'offset', 'muloffset',\n 'labelhide', 'colorbar', 'xerr', 'yerr']):\n fmt[key] = attr[key]\n # do not plot curve if was asked not to display it.\n if 'linestyle' in fmt and fmt['linestyle'] in Curve.LINESTYLEHIDE:\n return None, ignoreNext\n # some renaming of kewords, etc\n if 'legend' in fmt:\n fmt['label'] = fmt['legend']\n del fmt['legend']\n if 'cmap' in fmt and not isinstance(fmt['cmap'], str):\n # convert Colorscale into matplotlib cmap\n from grapa.colorscale import Colorscale\n fmt['cmap'] = Colorscale(fmt['cmap']).cmap()\n if 'vminmax' in fmt:\n if isinstance(fmt['vminmax'], list) and len(fmt['vminmax']) > 1:\n if (fmt['vminmax'][0] != '' and not np.isnan(fmt['vminmax'][0])\n and not np.isinf(fmt['vminmax'][0])):\n fmt.update({'vmin': fmt['vminmax'][0]})\n if (fmt['vminmax'][1] != '' and not np.isnan(fmt['vminmax'][1])\n and not np.isinf(fmt['vminmax'][1])):\n fmt.update({'vmax': fmt['vminmax'][1]})\n del fmt['vminmax']\n\n # start plotting\n # retrieve data after transform, including of offset and muloffset\n x = self.x_offsets(alter=alter[0])\n y = self.y_offsets(alter=alter[1])\n type_graph = self.attr('type', 'plot')\n if type_plot.endswith(' norm.'):\n type_graph = type_plot[:-6]\n y = y / max(y)\n\n # add keyword arguments which are in the plot method prototypes\n try:\n sig = inspect.signature(getattr(ax, type_graph))\n for key in sig.parameters:\n if key in attr and key not in fmt:\n fmt.update({key: attr[key]})\n except AttributeError:\n print('Curve.plot: desired plotting method not found ('+type_graph\n + '). Going for default.')\n # for xample 'errorbar_yerr' after suppression of previous Curve\n # 'errorbar'. Will be 'plot' anyway.\n pass\n except Exception as e:\n print('Exception in Curve.plot while identifying keyword',\n 'arguments:')\n print(type(e), e)\n\n if 'labelhide' in attr and attr['labelhide']:\n if 'label' in fmt:\n del fmt['label']\n\n # No support for the following methods (either 2D data, or complicated\n # to implement):\n # hlines, vlines, broken_barh, contour, contourf, polar,\n # pcolor, pcolormesh, streamplot, tricontour, tricontourf,\n # tripcolor\n # Partial support for:\n # imgshow\n attrIgnore = ['label', 'plot', 'linespec', 'type', 'ax_twinx',\n 'ax_twiny', 'offset', 'muloffset', 'labelhide',\n 'colorbar']\n # \"simple\" plotting methods, with prototype similar to plot()\n if type_graph in ['semilogx', 'semilogy', 'loglog', 'plot_date',\n 'stem', 'step', 'triplot']:\n handle = getattr(ax, type_graph)(x, y, linespec, **fmt)\n elif type_graph in ['fill']:\n if self.attr('fill_padto0', False):\n handle = ax.fill([x[0]]+list(x)+[x[-1]], [0]+list(y)+[0], linespec, **fmt)\n else:\n handle = ax.fill(x, y, linespec, **fmt)\n # plotting methods not accepting formatting string as 3rd argument\n elif type_graph in ['bar', 'barbs', 'barh', 'cohere', 'csd',\n 'fill_between', 'fill_betweenx', 'hexbin',\n 'hist2d', 'quiver', 'xcorr']:\n handle = getattr(ax, type_graph)(x, y, **fmt)\n # plotting of single vector data\n elif type_graph in ['acorr', 'angle_spectrum', 'eventplot', 'hist',\n 'magnitude_spectrum', 'phase_spectrum', 'pie',\n 'psd', 'specgram']:\n # careful with eventplot, the Curve data are modified\n handle = getattr(ax, type_graph)(y, **fmt)\n # a more peculiar plotting\n elif type_graph in ['spy']:\n handle = getattr(ax, type_graph)([x, y], **fmt)\n elif type_graph == 'stackplot':\n # look for next Curves with type == 'stackplot', and same x\n nexty = []\n fmt['labels'], fmt['colors'] = [''], ['']\n if 'label' in fmt:\n fmt['labels'] = ['' if self.attr('labelhide') else fmt['label']]\n del fmt['label']\n if 'color' in fmt:\n fmt['colors'] = [fmt['color']]\n del fmt['color']\n attrIgnore.append('color')\n if graph is not None:\n for j in range(graph_i+1, len(graph)):\n if graph[j].attr('type') == type_graph and np.array_equal(x, graph[j].x_offsets(alter=alter[0])):\n ignoreNext += 1\n if not graph[j].isHidden():\n nexty.append(graph[j].y_offsets(alter=alter[1]))\n lbl = graph[j].attr('label')\n fmt['labels'].append('' if graph[j].attr('labelhide') else lbl)\n fmt['colors'].append(graph[j].attr('color'))\n continue\n else:\n break\n if np.all([(c == '') for c in fmt['colors']]):\n del fmt['colors']\n handle = getattr(ax, type_graph)(x, y, *nexty, **fmt)\n elif type_graph == 'errorbar':\n # look for next Curves, maybe xerr/yerr was provided\n if 'xerr' in attr:\n fmt.update({'yerr': attr['xerr']})\n if 'yerr' in attr:\n fmt.update({'yerr': attr['yerr']})\n if graph is not None:\n for j in range(graph_i+1, min(graph_i+3, len(graph))):\n if len(graph[j].y()) == len(y):\n typenext = graph[j].attr('type')\n if typenext not in ['errorbar_xerr', 'errorbar_yerr']:\n break\n if typenext == 'errorbar_xerr':\n fmt.update({'xerr': graph[j].y_offsets()})\n ignoreNext += 1\n continue\n if typenext == 'errorbar_yerr':\n fmt.update({'yerr': graph[j].y_offsets()})\n ignoreNext += 1\n continue\n break\n handle = ax.errorbar(x, y, fmt=linespec, **fmt)\n elif type_graph == 'scatter':\n convert = {'markersize': 's', 'markeredgewidth': 'linewidths'}\n for key in convert:\n if key in fmt:\n fmt.update({convert[key]: fmt[key]})\n del fmt[key]\n try:\n if graph is not None:\n for j in range(graph_i+1, min(graph_i+3, len(graph))):\n typenext = graph[j].attr('type')\n if typenext not in ['scatter_c', 'scatter_s']:\n break\n if 's' not in fmt and typenext == 'scatter_s':\n fmt.update({'s': graph[j].y_offsets(alter=alter[1])})\n ignoreNext += 1\n continue\n elif 'c' not in fmt and (typenext == 'scatter_c' or np.array_equal(x, graph[j].x_offsets(alter=alter[0]))):\n fmt.update({'c': graph[j].y_offsets(alter=alter[1])})\n ignoreNext += 1\n if 'color' in fmt:\n # there cannot be both c and color keywords\n del fmt['color']\n continue\n else:\n break\n handle = ax.scatter(x, y, **fmt)\n except Exception as e:\n print('ERROR! Exception occured in Curve.plot function during',\n 'scatter.')\n print(type(e), e)\n elif type_graph == 'boxplot':\n if len(y) > 0 and not np.isnan(y).all():\n bxpltpos = self.attr('boxplot_position', None)\n boxplot['y'].append(y[~np.isnan(y)])\n boxplot['positions'].append(boxplot['i'] if bxpltpos is None else bxpltpos)\n boxplot['labels'].append(fmt['label'] if 'label' in fmt else '')\n boxplot['color'].append(fmt['color'] if 'color' in fmt else '')\n for key in ['widths', 'notch', 'vert']:\n if self.attr(key, None) is not None:\n boxplot.update({key: self.attr(key)})\n boxplot['i'] += 1\n elif type_graph == 'violinplot':\n if len(y) > 0 and not np.isnan(y).all():\n bxpltpos = self.attr('boxplot_position', None)\n violinplot['y'].append(y[~np.isnan(y)])\n violinplot['positions'].append(boxplot['i'] if bxpltpos is None else bxpltpos)\n violinplot['labels'].append(fmt['label'] if 'label' in fmt else '')\n violinplot['color'].append(fmt['color'] if 'color' in fmt else '')\n if 'showmeans' in attr:\n violinplotkwargs.update({'showmeans': attr['showmeans']})\n if 'showmedians' in attr:\n violinplotkwargs.update({'showmedians': attr['showmedians']})\n if 'showextrema' in attr:\n violinplotkwargs.update({'showextrema': attr['showextrema']})\n boxplot['i'] += 1\n elif type_graph in ['imshow', 'contour', 'contourf']:\n from grapa.curve_image import Curve_Image\n img, ignoreNext, X, Y = Curve_Image.getImageData(self, graph, graph_i, alter, ignoreNext)\n if 'label' in fmt:\n del fmt['label']\n if type_graph in ['contour', 'contourf']:\n for key in ['corner_mask', 'colors', 'alpha', 'cmap', 'norm',\n 'vmin', 'vmax', 'levels', 'origin', 'extent',\n 'locator', 'extend', 'xunits', 'yunits',\n 'antialiased', 'nchunk', 'linewidths',\n 'linestyles', 'hatches']:\n if key in attr and key not in fmt:\n fmt.update({key: attr[key]})\n # TODO: remove linewidths, linestyles for contourf, hatches for\n # contour\n args = [img]\n if (X is not None and Y is not None\n and type_graph in ['contour', 'contourf']):\n args = [X, Y] + args\n try:\n handle = getattr(ax, type_graph)(*args, **fmt)\n except Exception as e:\n print('Curve plot', type_graph, 'Exception')\n print(type(e), e)\n else:\n # default is plot (lin-lin) # also valid if no information is\n # stored, aka returned ''\n handle = ax.plot(x, y, linespec, **fmt)\n\n handles = handle if isinstance(handle, list) else [handle]\n for key in attr:\n if key not in fmt and key not in attrIgnore:\n for h in handles:\n if hasattr(h, 'set_'+key):\n try:\n getattr(h, 'set_'+key)(attr[key])\n except Exception as e:\n print('GraphIO Exception during plot kwargs',\n 'adjustment for key', key, ':', type(e))\n print(e)\n\n return handle, ignoreNext", "def plot(self, ax=None, name=None, **kwargs):\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots()\n\n name = self.estimator_name if name is None else name\n\n line_kwargs = {\n 'label': \"{} (AUC = {:0.2f})\".format(name, self.roc_auc), \n 'lw':0.25\n\n }\n line_kwargs.update(**kwargs)\n\n self.line_ = ax.plot(self.fpr, self.tpr, **line_kwargs)[0]\n ax.set_xlabel(\"False Positive Rate\")\n ax.set_ylabel(\"True Positive Rate\")\n ax.legend(loc='lower right')\n\n self.ax_ = ax\n self.figure_ = ax.figure\n return self" ]
[ "0.7304228", "0.6550803", "0.6549476", "0.63300985", "0.631119", "0.6244181", "0.614794", "0.61184627", "0.611656", "0.6109644", "0.60801786", "0.6004628", "0.59819883", "0.595841", "0.5869724", "0.58628213", "0.5857794", "0.5838128", "0.5826333", "0.58193934", "0.5785737", "0.5783892", "0.5718445", "0.57179666", "0.57162356", "0.5686101", "0.5677497", "0.56677884", "0.5615513", "0.5614723" ]
0.6976897
1
Main routine for plotting a single roccurve
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Plot the base line ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') # Plot the single roccurve line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax) line.set_label(bkgs[0].get_category()) # Plot settings ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def roc_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import roc_curve\n if train==True:\n ypredTrain = model.predict(X_train)\n fpr, tpr, thresholds = roc_curve(y_train, ypredTrain)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Train)\\n\", size=14)\n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n fpr, tpr, thresholds = roc_curve(y_test, ypredTest)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title('ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Test)\\n', size=14)\n plt.show()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_roc_curve(x_data, labels, net, plotfile,\n title=''):\n\n # Have the net predict, then split the scores by ground truth\n scores = net.predict(x_data)\n\n distfile = PLOTDIR / plotfile.replace('roc', 'dist')\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n df = pd.DataFrame({'Condition': ['Positive' if int(i) == 1 else 'Negative'\n for i in labels[0, :]],\n 'Score': scores[0, :]})\n sns.violinplot(x='Condition', y='Score', data=df, ax=ax)\n ax.set_title('{} Dist for Rap1 Identification'.format(title))\n\n fig.savefig(str(distfile))\n\n plt.close()\n\n fp_rate, tp_rate = calc_roc(scores[labels], scores[~labels])\n\n # Make the plot\n plotfile = PLOTDIR / plotfile\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n ax.plot(fp_rate, tp_rate, '-o', linewidth=3)\n\n # Plot the line for perfect confusion\n ax.plot([0, 1], [0, 1], '--', linewidth=3)\n\n ax.set_title('{} ROC for Rap1 Identification'.format(title))\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_xlim([-0.01, 1.01])\n ax.set_ylim([-0.01, 1.01])\n\n fig.savefig(str(plotfile))\n plt.close()", "def roc2(fpr, tpr, roc_auc):\n plt.figure()\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot():\n pass", "def roc_plot(label, fpr, tpr, roc_auc):\n plt.figure()\n for i in range(len(label)):\n plt.plot(fpr[i], tpr[i], label=label[i] + ' AUC = %0.2f' % roc_auc[i], alpha=0.75)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc='lower right')\n plt.show()", "def plotCaliCurve(constants, data, outName):\n x=np.linspace(min(data[:,0]),max(data[:,0]),1000)\n plt.figure()\n plt.rcParams.update({'font.size' : 16})\n plt.scatter(data[:,0],data[:,1])\n plt.plot(x,LangmuirCurve(x,constants[0],constants[1],constants[2],constants[3]))\n #plt.xlabel(\"MG Concentration (nM)\")\n #plt.ylabel(\"Relative SHS signal (Arb. Units)\")\n plt.savefig(outName + \"_cali_model_plot.png\")\n plt.show()", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def pr_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import precision_recall_curve\n if train==True:\n ypredTrain = model.predict(X_train) \n precisions, recalls, thresholds = precision_recall_curve(y_train, ypredTrain)\n plt.plot(precisions, recalls, linewidth=3, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Train)\\n\", size=14) \n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n precisions, recalls, thresholds = precision_recall_curve(y_test, ypredTest)\n plt.plot(precisions, recalls, linewidth=3, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Test)\\n\", size=14)\n plt.show()", "def rocs(test_set_y_org,test_set_y_pred_prob,methods,linestyles,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n\n num_methods=len(methods)\n roc_aucs=[0]*num_methods\n names=[None]*num_methods\n for m in range(num_methods):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n\n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob[m].ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m],average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n if m==0:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linestyle=linestyles[m],linewidth=1,color=colors[n_classes],label='{0}: macro-avg ROC (area={1:0.4f})'.format(methods[m], roc_auc[\"macro\"]))\n\n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linestyle=linestyles[m],linewidth=1,color=colors[c],label='{0}: ROC of {1} (area={2:0.4f})'.format(methods[m], classes_unique[c], roc_auc[c]))\n\n # add some text for labels, title and axes ticks\n if m==num_methods-1:\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n name=[methods[m]+\"_AUROC_\" + c for c in classes_unique]\n name.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n name=np.array(name)\n\n roc_aucs[m]=roc_auc\n names[m]=name\n \n return roc_aucs,names", "def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plot_roc_distributions(self, model_str, resampling_number, roc_curve_steps, roc_plot_path):\n sampling_types = ['Normal', 'Oversampling', 'Undersampling']\n\n PLOT_MARGIN = 0.05\n plt.rcParams[\"figure.figsize\"] = (16, 9)\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n sub_plot_index = 1\n\n for sampling_type in sampling_types:\n mean_fpr, mean_tpr, mean_threshold, mean_auc, std_auc = self._compute_mean_auc_data(sampling_type, model_str, resampling_number, roc_curve_steps)\n\n plt.subplot(int('22' + str(sub_plot_index)))\n\n sub_plot_index += 1\n\n plt.plot(mean_fpr, mean_tpr, color='g', label='AUC:{0}, STD:{1}'.format(round(mean_auc, 2), round(std_auc, 2)))\n plt.plot(mean_fpr, mean_threshold, linestyle='--', lw=2, color='b', label='Thresholds')\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance')\n\n plt.xlim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.ylim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(sampling_type + ' ROC Distribution')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(roc_plot_path)\n plt.clf()", "def plot_roc_curve(y_true, y_pred_prob, show_threshold=False, **params):\n\n figure = plt.figure(figsize=params.get('figsize', (17, 10)))\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.5f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xticks(np.arange(0.0, 1.1, step=0.1))\n plt.yticks(np.arange(0.0, 1.1, step=0.1))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n if show_threshold:\n ax2 = plt.gca().twinx()\n ax2.plot(fpr, thresholds, markeredgecolor='r',\n linestyle='dashed', color='r')\n ax2.set_ylabel('Threshold', color='r')\n ax2.set_ylim([0.0, 1.0])\n ax2.set_xlim([0.0, 1.0])\n\n plt.show()\n\n return figure, roc_auc", "def _roc_plot_single(metrics, save_name):\n plt.figure()\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.plot(metrics[\"fpr\"], metrics[\"tpr\"], \"r\", linewidth=2)\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.grid()\n plt.xlabel(\"True Positive Rate\")\n plt.ylabel(\"False Positive Rate\")\n plt.tight_layout()\n plt.savefig(save_name)", "def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()", "def rplot(Qz, R, format):\n # plt.hold(True)\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n rr = xs.real\n if (rr>1e-8).any():\n plt.plot(Qz, rr, format, label=name + 'r')\n plt.legend()\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n ri = xs.imag\n if (ri>1e-8).any():\n plt.plot(Qz, ri, format, label=name + 'i')\n plt.legend()\n\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n phi = np.arctan2(xs.imag, xs.real)\n if (ri>1e-8).any():\n plt.plot(Qz, phi, format, label=name + 'i')\n plt.legend()", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf", "def plot_curve(self, true_values, predictions, ax=None, title='ROC', label='ROC', lw=1, add_auc=True, **kwargs):\n fpr, tpr, _ = roc_curve(true_values, predictions)\n roc_auc = auc(fpr, tpr)\n label_auc = label + ': {:.3f} AUC'.format(roc_auc)\n logging.info('ROC result: %s', label_auc)\n ax.plot(fpr, tpr, lw=lw, label=label_auc if add_auc else label, **kwargs)\n ax.set_title(title)\n ax.set_xlabel('FPR')\n ax.set_ylabel('TPR')\n ax.legend(loc='lower right', frameon=False)\n return ax", "def main():\n ### read parameters from input file\n charfile, headerfile, ofname, nfil, nskip, nbin, rmax, ntotal = read_settings()\n ### loop over the entire trajectory and compute the center of mass, correct for pbc\n r, pol_corr = compute_polarity_correlation(charfile, headerfile, nfil, nskip, nbin, rmax, ntotal)\n\n ### write results to file and generage a plot\n # generate folder structure\n os.system('mkdir ' + ofname)\n # write data of the averaged polarity correlation\n ofile = open(ofname + '/pol_correlation.data', 'w')\n ofile.write('polarity correlation function\\n\\n')\n ofile.write('r_min\\tr_max\\tpol_corrlation\\n')\n for i in range(nbin):\n ofile.write(str(r[i]) + '\\t' + str(r[i+1]) + '\\t' + str(pol_corr[i]) + '\\n')\n ofile.close()\n # gen figure of the averaged polarity correlation\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.plot(0.5*(r[:-1]+r[1:]), pol_corr)\n ax.set_xlabel(r'r [$\\sigma]')\n ax.set_ylabel(r'g_p(r)')\n ax.set_title('Polarity Correlation Function')\n plt.savefig(ofname + '/pol_correlation.png')\n plt.close()\n return", "def plot_curve(self, fig, ax, linewidth=1.5, linestyle='-', color='black', u1=0.00, u2=1.00):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n u = np.linspace(u1, u2, 501)\n X = np.real(self.get_value(u))\n line, = ax.plot(u, X[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n u = np.linspace(u1, u2, 501)\n X, Y = np.real(self.get_value(u))\n line, = ax.plot(X, Y)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n u = np.linspace(u1, u2, 501)\n X, Y, Z = np.real(self.get_value(u))\n line, = ax.plot(X, Y, Z)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n return fig, ax", "def plot(self):\n pass", "def plot_ROC(model, x_test, y_test, save_folder): \n predicted = model.predict(x_test).ravel()\n actual = y_test.ravel()\n fpr, tpr, thresholds = roc_curve(actual, predicted, pos_label=None)\n roc_auc = auc(fpr, tpr)\n plt.title('Test ROC AUC')\n plt.plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc)\n plt.legend(loc='lower right')\n plt.plot([0,1],[0,1],'r--')\n plt.xlim([0.0,1.0])\n plt.ylim([0.0,1.0])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(save_folder + '/ROC.png')\n plt.show()\n plt.close()" ]
[ "0.7373159", "0.7010362", "0.6923584", "0.6742388", "0.66598225", "0.65181124", "0.6510751", "0.6500974", "0.64510316", "0.6430082", "0.6429037", "0.6416149", "0.63878846", "0.63805693", "0.6365573", "0.63452685", "0.63356173", "0.6325078", "0.6313239", "0.6286145", "0.6283321", "0.62715095", "0.6270201", "0.6249389", "0.6241064", "0.6240671", "0.6230367", "0.62162125", "0.62142867", "0.62104845" ]
0.7444382
0
Plots the roccurve per background category. Assumes signals are all datasets of the same signal.
def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Get signal efficieny once eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) # Perform some basic plotting setup ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) # Then efficiencies per bkg category (ttjets, qcd, ...) bkg_categories = list(set([ b.get_category() for b in bkgs ])) bkg_categories.sort() lines = {} for bkg_cat in bkg_categories: # Get Datasets that have this category bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ] # Compute efficiency in this category eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values) # Draw roccurve for this category line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax) line.set_label(bkg_cat) # Save this line in a dict for potential outputting/modifying lines[bkg_cat] = line return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def figure_rois(self):\n channels=[[221,\"CH1 (red) average\",self.Ravg,'magenta'],\n [222,\"CH2 (green) average\",self.Gavg,'green'],\n [223,\"G/R average\",self.GoRavg,'gray'],\n [224,\"Gstd/Ravg\",self.Gstd/self.Ravg,'jet'],\n ]\n\n plt.figure(figsize=(16,12))\n for roiNumber in range(len(self.rois)):\n for i,channel in enumerate(channels):\n subplot,label,image2d,color=channel\n label+=\" [ROI %d]\"%(roiNumber)\n plt.subplot(subplot)\n plt.title(label)\n plot_image(image2d,cm=color,colorbar=(roiNumber==0),\n percentile=(1,99))\n plot_roi_bounds(self.rois[roiNumber]['bounds'],\n label=roiNumber+1)\n plt.tight_layout()\n plot_saveOrShow(self.folderSave+\"/roiAll.png\",show=False)", "def draw_roc(signal, background, output_dir=\".\", output_name=\"roc.pdf\"):\n\n x, y = get_roc(signal, background)\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(6, 6), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.plot([0, 1], [0, 1], ':', color='black', lw=2, label=\"Random cut\")\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])", "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def plot(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))", "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def draw_roc(signal, background, output_dir=\".\", output_name=\"roc\", form=\".pdf\"):\n\n x, y = get_roc(signal, background)\n file_path = output_dir + \"/\"+ output_name + \"_X.cvs\"\n numpy.savetxt(file_path, x, delimiter=\",\")\n file_path = output_dir + \"/\"+ output_name + \"_Y.cvs\"\n numpy.savetxt(file_path, y, delimiter=\",\")\n output_name = output_name + form\n\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])", "def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()", "def spiderplot(categories, values, ax=None,\n axfc = None,\n lcolor=\"k\", lsize=\"small\", \n rcolor=\"0.7\", rsize=\"small\", rarray=None,\n title=None, titlecolor=\"k\", titlesize=\"medium\",\n fillcolor = \"C0\", fillalpha=0.1, \n highlight_unique=True,\n highlight_color=\"C0\", \n **kwargs):\n import matplotlib.pyplot as mpl\n \n if highlight_unique:\n flagnonzero = np.asarray(values)>0 \n highlight = np.argwhere(flagnonzero)[0] if np.sum(flagnonzero) == 1 else None\n lcolor = \"0.5\"\n else:\n highlight = None\n \n # But we need to repeat the first value to close the circular graph:\n values = list(values)\n values += values[:1]\n ncategories = len(categories)\n \n \n # == Plot\n if ax is None:\n fig = mpl.figure(figsize=[3,3.5])\n ax = fig.add_axes([0.1,0.12,0.8,0.7], polar=True, \n facecolor=axfc,\n zorder=1)\n else:\n ax = ax\n fig = ax.figure\n\n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(ncategories) * 2 * np.pi for n in range(ncategories)]\n angles += angles[:1]\n \n # Draw one axe per variable + add labels labels yet\n ax.set_xticks(angles[:-1])\n ax.set_xticklabels(categories, color=lcolor, size=lsize)\n \n if highlight is not None and highlight_unique:\n xtick = ax.get_xticklabels()[highlight[0]]\n xtick.set_color(highlight_color)\n xtick.set_weight(\"bold\")\n xtick.set_size(xtick.get_size()*1.2)\n \n\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n \n # Scaling\n if rarray is not None: \n ax.set_yticks(rarray[:-1])\n ax.set_ylim(0,rarray[-1])\n \n ax.set_yticklabels(np.asarray(ax.get_yticks(), dtype=\"str\"), \n color=rcolor, size=rsize)\n \n # --------------- #\n # Actual Plot #\n # --------------- #\n # Plot data\n prop = dict(linewidth=1.5, linestyle='solid', color=fillcolor)\n for k,v in kwargs.items():\n prop[k] = v\n # python 3 -> prop = {**dict(linewidth=1.5, linestyle='solid'), **kwarg}\n ax.plot(angles, values, **prop)\n \n # Fill area\n ax.fill(angles, values, fillcolor, alpha=fillalpha)\n \n # Additional Info\n # First entry\n if title is not None:\n ax.set_title(title, size=titlesize, color=titlecolor)\n \n return {\"ax\":ax, \"fig\":fig, \"highlight\":highlight}", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def plot_ROC_curves(fig, ax, y_all, perf, title=None):\n curves = {'IMPRESS_all': 'royalblue',\n 'IMPRESS_HE_only': 'plum',\n 'IMPRESS_IHC_only': 'pink',\n 'pathologists_eval': 'tomato'}\n \n type_convert = {'IMPRESS_all': 'IMPRESS',\n 'IMPRESS_HE_only': 'IMPRESS (H&E only)',\n 'IMPRESS_IHC_only': 'IMPRESS (IHC only)',\n 'pathologists_eval': 'Pathologists'}\n \n for fgroup in curves.keys():\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n ax.set_aspect('equal')\n for seed in range(int(y_all[fgroup].shape[1]/3)):\n y_true = y_all[fgroup].loc[:,'y_true'].iloc[:,seed]\n y_pred_proba = y_all[fgroup].loc[:,'y_pred_proba'].iloc[:,seed]\n tpr, fpr, treshold = roc_curve(y_true, 1-y_pred_proba)\n tprs.append(np.interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr, tpr, color=curves[fgroup], linewidth=2, alpha=0.10, label=None)\n \n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n \n ax.plot(mean_fpr, mean_tpr, color=curves[fgroup],\n label=r'%s (AUC = %0.4f $\\pm$ %0.2f)' % \\\n (type_convert[fgroup], perf[fgroup].loc['AUC','mean'], perf[fgroup].loc['AUC','std']),\n linewidth=3.0, alpha=0.80)\n \n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n \n if fgroup == 'IMPRESS_all':\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=r'$\\pm$ 1 standard deviation')\n else:\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=None)\n \n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n x = [0.0, 1.0]\n plt.plot(x, x, linestyle='dashed', color='red', linewidth=2.0, label='Random')\n plt.legend(fontsize=10, loc='best')\n \n if title is not None:\n fig.suptitle(t=title, fontsize=12)\n return fig", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plotYields(data,signal=None,backgrounds=[],bins=[]):\n print \n if not bins:\n center = [i+0.5 for i,d in enumerate(data)] # pseudo-data points for making histogram\n bins = [i for i in range( len(data)+1 )] # pseudo-binning\n else:\n center = [ 0.5*(b+bins[i+1]) for i,b in enumerate(bins) if i<len(bins)-1]\n data = np.array(data)\n\n # stack the backgrounds on top of each other in the plot\n nbckgs = len(backgrounds)\n labels = ['background {0}'.format(i) for i in range(nbckgs)]\n weights = list(backgrounds)\n bincenters = [ list(center) for _ in range(nbckgs)]\n\n # stack the signal on top of the backgrounds\n if signal is not None:\n # 'signal' is what we want to unfold, e.g., ttbar\n labels += ['signal']\n weights += [list(signal)]\n bincenters += [list(center)]\n\n # plot backgrounds & signal\n d,bb,pp = plt.hist(bincenters,weights=weights,stacked=True,\n histtype='stepfilled',label=labels,\n edgecolor='k',bins=bins)\n\n # plot the data as error bars\n plt.errorbar(center,data,color='k',\n fmt='o',yerr=np.sqrt(data),\n label='Data')\n\n plt.ylim(ymin=0,ymax=plt.ylim()[1]*1.6) # scale the y-axis to accommodate the legend\n plt.legend()\n plt.xlabel(\"Distribution\")\n plt.ylabel(\"Events\")\n\n return", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def plot_1(ecg, sample_rate=500, title = 'ECG'):\n plt.figure(figsize=(15,2))\n plt.suptitle(title)\n plt.subplots_adjust(\n hspace = 0, \n wspace = 0.04,\n left = 0.04, # the left side of the subplots of the figure\n right = 0.98, # the right side of the subplots of the figure\n bottom = 0.2, # the bottom of the subplots of the figure\n top = 0.88\n )\n seconds = len(ecg)/sample_rate\n\n ax = plt.subplot(1, 1, 1)\n step = 1.0/sample_rate\n _ax_plot(ax,np.arange(0,len(ecg)*step,step),ecg, seconds)", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def diffuse_flux(self, rois=[0,888]):\n fig, ax = plt.subplots(1,1, figsize=(6,6), dpi=150, sharey=True)\n egev = np.array(self.energy)/1e3\n if rois is None: rois = self.rois\n\n for r in rois:\n gal, iso = self.get_background(r)\n ax.plot(egev, gal, '-D', label='gal %d'%r)\n ax.plot(egev, iso, '--o', label='iso %d'%r)\n plt.setp(ax, xscale='log', xlim=(0.1,300), xlabel='Energy (GeV)',\n yscale='log', ylim=(1e-1,1e6), ylabel='Diffuse counts/ROI')\n ax.legend(prop=dict(size=10)); ax.grid()\n return fig", "def plotRadarPlot(data_grouped, save=False, *args):\n #We get the name of features\n variables = data_grouped.columns\n #We get the ranges of each features\n ranges = findRanges(data_grouped)\n #We plot each cluster on a different radar (better for vizualisation\n for i in range(0, len(data_grouped)):\n #Init the figure\n fig1 = plt.figure(figsize=(6, 6))\n #Init the radar\n radar = ComplexRadar(fig1, variables, ranges)\n #Init values on the radar\n radar.plot(data_grouped.loc[i, :], ranges)\n #Fill the radar (plot looks better with that fill)\n radar.fill(data_grouped.loc[i, :], alpha=0.2)\n if save == True:\n try:\n plt.savefig(args + \"radar\" + data_grouped.loc[i, :] + \".png\")\n except NameError:\n print('Missing the path for saving')\n plt.show()", "def plot_corner(self, caxes):\n xx = np.array([self.parchain[p] for p in self.show])\n labels = [pretty.get(p, p) for p in self.show]\n spans = get_spans(None, xx, weights=self.weights)\n\n truths = self.convert(dict_to_struct(self.obs[\"mock_params\"]))\n tvec = np.array([truths[p] for p in self.show])\n caxes = allcorner(xx, labels, caxes, weights=self.weights, span=spans,\n color=self.pkwargs[\"color\"], hist_kwargs=self.hkwargs,\n psamples=tvec, samples_kwargs={\"color\": self.tkwargs[\"mfc\"], \"edgecolor\": \"k\"},\n label_kwargs=self.label_kwargs,\n tick_kwargs=self.tick_kwargs, max_n_ticks=4)\n # Plot truth\n for ax, p in zip(np.diagonal(caxes), self.show):\n ax.axvline(truths[p], marker=\"\", **self.tkwargs)\n\n # plot priors\n if self.prior_samples > 0:\n self.show_priors(np.diag(caxes), spans, smooth=0.1, **self.rkwargs)", "def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()", "def plot_feature_correlations(self):\n\n fig = plt.figure(figsize=(18,18), tight_layout=True)\n fig.suptitle('Feature correlations', fontsize=24)\n\n sns.heatmap(self.train_data.astype(float).corr(method='kendall'), linewidths=0.1, vmin=-1.0,\n vmax=1.0, square=True, linecolor='white', annot=True, \n cmap=\"PiYG\")\n plt.savefig(r'data_analysis\\correlations_kendall_' + self.file_name + '.png', \n facecolor=fig.get_facecolor())", "def plot_all(pred_tuple, filename='roc.png'):\n plt.clf()\n colors = [\"red\",\"blue\",\"green\",\"orange\",\"yellow\"]\n for (label, y, proba), color in zip(pred_tuple, colors):\n true_pos, false_pos, thresh = metrics.roc_curve(y, proba)\n plt.plot(false_pos, true_pos, label=label, linewidth=2,\n color=color)\n plt.plot([0,1],[0,1], linestyle=\"dashed\", color=\"grey\", label=\"random\")\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"Receiver Operating Characteristic\")\n plt.legend(loc=\"lower right\")\n\n plt.show()\n plt.savefig(_plots_path + filename)", "def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def visualize(self, background, num_labeled=10, magnification=1.0, viz=True, cutoff=100):\n \n assert magnification >= 1.0\n lst_x, lst_y, keys = self.create_xy_table(background, cutoff=cutoff)\n fig, ax = plt.subplots()\n low, high = 0.0, round(float(1)/magnification, 1)\n ax.set_xlim(low, high)\n ax.set_ylim(low, high)\n ax.set_aspect('equal')\n ax.scatter(lst_x, lst_y)\n\n for idx, key in enumerate(keys):\n if idx > num_labeled: txt = ''\n ax.annotate(key, (lst_x[idx],lst_y[idx]))\n\n if viz:\n plt.show()\n else:\n name = self.name if self.name else 'anon'\n plt.savefig(name)", "def ShowLongitBackground(spectra,spectraUp,spectraDown,spectraAv,all_titles,all_filt,object_name,NBIMGPERROW=2,right_edge=1800):\n NBSPEC=len(spectra)\n MAXIMGROW=(NBSPEC-1) / NBIMGPERROW +1\n\n f, axarr = plt.subplots(MAXIMGROW,NBIMGPERROW,figsize=(25,5*MAXIMGROW))\n f.tight_layout()\n for index in np.arange(0,NBSPEC):\n ix=index%NBIMGPERROW\n iy=index/NBIMGPERROW\n axarr[iy,ix].plot(spectra[index],'r-')\n axarr[iy,ix].plot(spectraUp[index],'b-')\n axarr[iy,ix].plot(spectraDown[index],'g-')\n axarr[iy,ix].plot(spectraAv[index],'m-')\n thetitle=\"{}) : {} \".format(index,all_titles[index])\n axarr[iy,ix].set_title(thetitle)\n axarr[iy,ix].grid(True)\n axarr[iy,ix].set_ylim(0.,spectra[index][:right_edge].max()*1.2)\n axarr[iy,ix].annotate(all_filt[index],xy=(0.05,0.9),xytext=(0.05,0.9),verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20, xycoords='axes fraction')\n title='Longitudinal background Up/Down'.format(object_name)\n plt.suptitle(title,size=16)" ]
[ "0.6300367", "0.61281216", "0.6091596", "0.60315055", "0.59216946", "0.5900686", "0.5838783", "0.5722116", "0.55689114", "0.5540599", "0.5525509", "0.5439819", "0.54131126", "0.540916", "0.5403627", "0.53976256", "0.53795195", "0.53759587", "0.53333044", "0.53327256", "0.53212583", "0.5320267", "0.5310172", "0.53059155", "0.53008276", "0.5278984", "0.5265435", "0.5264627", "0.52614826", "0.5259024" ]
0.6172026
1
Fills a coffea.hist.Hist for a single distribution. Takes a list of Dataset objects, and a function `get_array` that should return a numpylike array when given an arrays object. Also requires a string `name` to know in which hist to fill it
def hist_single_distribution( arrays_iterator, get_array, varname='somevar', vartitle=None, distrname='somedistr', distrtitle=None, hist=None, left=-1., right=1., nbins=50 ): if hist is None: import coffea.hist vartitle = varname if vartitle is None else vartitle hist = coffea.hist.Hist( "Count", coffea.hist.Bin(varname, vartitle, nbins, left, right), coffea.hist.Cat('label', varname), ) for arrays, dataset in arrays_iterator: print(dataset.get_weight(), get_array(arrays)) hist.fill(label=distrname, weight=dataset.get_weight(), **{varname: get_array(arrays)}) return hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def getHist(self, name, **kwargs):\n extra = kwargs.get(\"extra\", \"\")\n\n hist1 = [\n self._f.Get(\n \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\".format(\n {\n \"dir\": self._directory1,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n hist2 = [\n self._f2.Get(\n \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\".format(\n {\n \"dir\": self._directory2,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], 9)\n ] # Get jT histograms from file an array\n hist = [\n hist1[i] if (i < self._range[1] - self._range[0]) else hist2[i]\n for i in range(0, 9 - self._range[0])\n ]\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n jetPt = parse_jet_pt_bins(hist)\n\n for h, N, bgN, rndmbgN in zip(\n hist, self._measN, self._measBgN, self._measRndmBgN\n ):\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n h.Scale(1.0 / bgN, \"width\")\n print(\"{} is bg\".format(name))\n elif kwargs.get(\"isRndmBg\", False):\n print(\"Is random background\")\n h.SetLineColor(self.properties.get(\"color\", 1) + 2)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 2)\n h.Scale(1.0 / rndmbgN, \"width\")\n print(\"Scale by {}\".format(rndmbgN))\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n h.Scale(1.0 / N, \"width\")\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt\n else:\n return hist", "def hist_aggregate(hist_name, hist_dim=1, norm=None, **hist_args):\n\tdef decorator(fn):\n\t\tdef _inner(vals, hist_collection):\n\t\t\tvals = fn(vals)\n\n\t\t\t# we want to be able to handle dicts\n\t\t\t# for the case where multiple instances of the \"same\" hist\n\t\t\t# separated by a selection (the dict key) are returned.\n\t\t\t# if that *isn't* what happened, turn it into a dict with a single key.\n\t\t\tif not isinstance(vals, dict):\n\t\t\t\tvals = {None: vals}\n\n\t\t\tfor subsample, vs in vals.items():\n\t\t\t\tfull_hist_name = \"%s_%s\" % (hist_name, subsample) if subsample else hist_name\n\t\t\t\tif hist_dim == 1:\n\t\t\t\t\thist, bins = numpy.histogram(vs, **hist_args)\n\t\t\t\telif hist_dim == 2:\n\t\t\t\t\tif len(vs) == 0:\n\t\t\t\t\t\treturn\n\t\t\t\t\thist, binsx, binsy = numpy.histogram2d(*vs, **hist_args)\n\t\t\t\t\tbins = (binsx, binsy)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unsupported histogram dimension: \" + str(hist_dim))\n\n\t\t\t\tif full_hist_name in hist_collection:\n\t\t\t\t\th = hist_collection[full_hist_name]\n\t\t\t\t\tif h.dim == 1:\n\t\t\t\t\t\tassert all(h.bins == bins)\n\t\t\t\t\telif h.dim == 2:\n\t\t\t\t\t\tassert all([numpy.array_equal(h.bins[i], bins[i]) for i in range(len(h.bins))])\n\t\t\t\t\thist_collection[full_hist_name].data += hist\n\t\t\t\telse:\n\t\t\t\t\th = Hist(dim=hist_dim, bins=bins, data=hist, norm=norm)\n\t\t\t\t\thist_collection[full_hist_name] = h\n\n\t\treturn _inner\n\n\treturn decorator", "def histogram(\n *args,\n bins=None,\n range=None,\n dim=None,\n weights=None,\n density=False,\n block_size=\"auto\",\n keep_coords=False,\n bin_dim_suffix=\"_bin\",\n):\n\n args = list(args)\n N_args = len(args)\n\n # TODO: allow list of weights as well\n N_weights = 1 if weights is not None else 0\n\n for a in args:\n if not isinstance(a, xr.DataArray):\n raise TypeError(\n \"xhistogram.xarray.histogram accepts only xarray.DataArray \"\n + f\"objects but a {type(a).__name__} was provided\"\n )\n\n for a in args:\n assert a.name is not None, \"all arrays must have a name\"\n\n # we drop coords to simplify alignment\n if not keep_coords:\n args = [da.reset_coords(drop=True) for da in args]\n if N_weights:\n args += [weights.reset_coords(drop=True)]\n # explicitly broadcast so we understand what is going into apply_ufunc\n # (apply_ufunc might be doing this by itself again)\n args = list(xr.align(*args, join=\"exact\"))\n\n # what happens if we skip this?\n # args = list(xr.broadcast(*args))\n a0 = args[0]\n a_coords = a0.coords\n\n # roll our own broadcasting\n # now manually expand the arrays\n all_dims = [d for a in args for d in a.dims]\n all_dims_ordered = list(OrderedDict.fromkeys(all_dims))\n args_expanded = []\n for a in args:\n expand_keys = [d for d in all_dims_ordered if d not in a.dims]\n a_expanded = a.expand_dims({k: 1 for k in expand_keys})\n args_expanded.append(a_expanded)\n\n # only transpose if necessary, to avoid creating unnecessary dask tasks\n args_transposed = []\n for a in args_expanded:\n if a.dims != all_dims_ordered:\n args_transposed.append(a.transpose(*all_dims_ordered))\n else:\n args.transposed.append(a)\n args_data = [a.data for a in args_transposed]\n\n if N_weights:\n weights_data = args_data.pop()\n else:\n weights_data = None\n\n if dim is not None:\n dims_to_keep = [d for d in all_dims_ordered if d not in dim]\n axis = [args_transposed[0].get_axis_num(d) for d in dim]\n else:\n dims_to_keep = []\n axis = None\n\n h_data, bins = _histogram(\n *args_data,\n weights=weights_data,\n bins=bins,\n range=range,\n axis=axis,\n density=density,\n block_size=block_size,\n )\n\n # create output dims\n new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]\n output_dims = dims_to_keep + new_dims\n\n # create new coords\n bin_centers = [0.5 * (bin[:-1] + bin[1:]) for bin in bins]\n new_coords = {\n name: ((name,), bin_center, a.attrs)\n for name, bin_center, a in zip(new_dims, bin_centers, args)\n }\n\n # old coords associated with dims\n old_dim_coords = {name: a0[name] for name in dims_to_keep if name in a_coords}\n\n all_coords = {}\n all_coords.update(old_dim_coords)\n all_coords.update(new_coords)\n # add compatible coords\n if keep_coords:\n for c in a_coords:\n if c not in all_coords and set(a0[c].dims).issubset(output_dims):\n all_coords[c] = a0[c]\n\n output_name = \"_\".join([\"histogram\"] + [a.name for a in args[:N_args]])\n\n da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name)\n\n return da_out\n\n # we need weights to be passed through apply_func's alignment algorithm,\n # so we include it as an arg, so we create a wrapper function to do so\n # this feels like a hack\n # def _histogram_wrapped(*args, **kwargs):\n # alist = list(args)\n # weights = [alist.pop() for n in _range(N_weights)]\n # if N_weights == 0:\n # weights = None\n # elif N_weights == 1:\n # weights = weights[0] # squeeze\n # return _histogram(*alist, weights=weights, **kwargs)", "def _make_hist(self, oned_arr):\n hist_ = np.histogram(\n a=oned_arr,\n bins=self.null_distributions_[\"histogram_bins\"],\n range=(\n np.min(self.null_distributions_[\"histogram_bins\"]),\n np.max(self.null_distributions_[\"histogram_bins\"]),\n ),\n density=False,\n )[0]\n return hist_", "def addHistogram1D(self, name, title, n_bins, minimum, maximum):\n\t\tself.histograms[ name ] = ROOT.TH1F(name, title, n_bins, minimum, maximum)", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def getHist(self, name, **kwargs):\n extra = kwargs.get(\"extra\", \"\")\n print(name)\n print(extra)\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\"\n\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n jetPt = parse_jet_pt_bins(hist)\n\n if \"LeadingRef\" in name:\n normalization = [\n self._f.Get(\n \"{}/LeadingRefJetPtBin/LeadingRefJetPtBinNFin{:02d}JetPt{:02d}\".format(\n self._directory, self._NFIN, i\n )\n ).Integral()\n for i in range(self._range[0], self._range[1])\n ] # Get number of jets by jet pT bins\n print(\"Normalization set to LeadingRef\")\n print(normalization)\n print(\"Before:\")\n print(self._measN)\n else:\n normalization = self._measN\n\n if self.properties.get(\"isWeight\", False):\n normalizer = range(10)\n else:\n if kwargs.get(\"isBg\", False):\n normalizer = self._measBgN\n elif kwargs.get(\"isRndmBg\", False):\n normalizer = self._measRndmBgN\n else:\n normalizer = normalization\n\n for h, N in zip(hist, normalizer):\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n print(kwargs)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n h.Scale(1.0 / N, \"width\")\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n elif kwargs.get(\"isRndmBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 2)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 2)\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt\n else:\n return hist", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def hist(bins, y, /, axis=0):\n if bins.ndim != 1:\n raise ValueError('Bins must be 1-dimensional.')\n\n with quack._ArrayContext(y, push_right=axis) as context:\n # Get flattened data\n y = context.data\n yhist = np.empty((y.shape[0], bins.size - 1))\n\n # Take histogram\n for k in range(y.shape[0]):\n yhist[k, :] = np.histogram(y[k, :], bins=bins)[0]\n\n # Replace data\n context.replace_data(yhist)\n\n # Return unflattened data\n return context.data", "def data_hist(xvar, yvar, datahist, nbins=95):\n hists = [datahist[j].createHistogram(\n 'hdata{0}{1}'.format(c, i),\n xvar, RooFit.Binning(nbins),\n RooFit.YVar(yvar, RooFit.Binning(nbins))\n ) for j, (i, c) in enumerate(ic)]\n return hists", "def _get_hist_data(self,hists,data):\n try:\n for hist in hists:\n self._get_hist_data(hist,data)\n except TypeError:\n hist_dict = {\"name\" : hists.hist.GetName(),\"cut_labels\" : hists.cut_labels, \"use_for_eff\" : self.use_for_eff}\n data.append(hist_dict)\n return data", "def __init__(self, array, compute_histogram=True):\n\n self.data = array\n self.histogram = np.array([])\n self.dim_x = array.shape[0]\n self.dim_y = array.shape[1]\n self.dim_z = array.shape[2]\n\n if compute_histogram:\n self.compute_histogram()", "def collect_absolute_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901\n\n if tensor not in self.histogram_dict:\n # first time it uses num_bins to compute histogram.\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n # increase the number of bins\n width = old_hist_edges[1] - old_hist_edges[0]\n # NOTE: np.arange may create an extra bin after the one containing temp_amax\n new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[: len(old_hist)] += old_hist\n self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))", "def collect_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n threshold = max(abs(min_value), abs(max_value))\n\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold\n )\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))\n self.histogram_dict[tensor] = (\n hist,\n hist_edges,\n min_value,\n max_value,\n threshold,\n )", "def Hist2DUUID(*args,**kargs):\n func = ROOT.TH2F\n if \"TH2D\" in kargs and kargs[\"TH2D\"]:\n func = ROOT.TH2D\n if \"TEfficiency\" in kargs and kargs[\"TEfficiency\"]:\n func = ROOT.TEfficiency\n name = uuid.uuid1().hex\n hist = None\n if len(args) == 2 and type(args[0]) == list and type(args[1]) == list:\n hist = func(name,\"\",len(args[0])-1,array.array('f',args[0]),len(args[1])-1,array.array('f',args[1]))\n elif len(args) == 6:\n for i in range(6):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2],args[3],args[4],args[5])\n elif len(args) == 4:\n if type(args[0]) == list:\n for i in range(1,4):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",len(args[0])-1,array.array('d',args[0]),args[1],args[2],args[3])\n elif type(args[3]) == list:\n for i in range(3):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2],len(args[3])-1,array.array('d',args[3]))\n else:\n raise Exception(\"Hist: Innapropriate arguments, requires either nBins, low, high or a list of bin edges:\",args)\n return hist", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def array_converter(roodataobject,obs_name):\n try:\n from numpy import array\n except ImportError:\n from array import array as array\n\n # Create the histogram with respect the observable\n histo = roodataobject.createHistogram(obs_name)\n # Normalize\n histo.Scale(1.0/histo.Integral())\n _provlist = []\n for i in xrange(1,histo.GetNbinsX()+1):\n _provlist.append(histo.GetBinContent(i))\n\n # the output array\n try:\n harray = array([ x for x in _provlist ],dtype='d')\n except TypeError:\n harray = array('d',[ x for x in _provlist ])\n return harray", "def HistUUID(*args,**kargs):\n func = ROOT.TH1F\n if \"TH1D\" in kargs and kargs[\"TH1D\"]:\n func = ROOT.TH1D\n if \"TEfficiency\" in kargs and kargs[\"TEfficiency\"]:\n func = ROOT.TEfficiency\n name = uuid.uuid1().hex\n hist = None\n if len(args) == 1 and type(args[0]) == list:\n hist = func(name,\"\",len(args[0])-1,array.array('f',args[0]))\n elif len(args) == 3:\n for i in range(3):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2])\n else:\n raise Exception(\"Hist: Innapropriate arguments, requires either nBins, low, high or a list of bin edges:\",args)\n return hist", "def histogram(arr, xlbl, xrng=None, nbins=20, alpha=1.):\n if xrng is None:\n xrng = (np.min(arr),np.max(arr))\n p = figure(plot_width=600, plot_height=400)\n # Histogram\n hist, edges = np.histogram(arr, range=xrng, density=True, bins=nbins)\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color='blue', alpha=alpha)\n # Label\n p.xaxis.axis_label = xlbl\n # Show\n show(p)", "def glGetHistogram( baseFunction, target, reset, format, type, values=None):\r\n if values is None:\r\n width = glGetHistogramParameteriv(\r\n target,\r\n GL_HISTOGRAM_WIDTH,\r\n )\r\n values = images.images.SetupPixelRead( format, (width,4), type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, reset, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(values))\r\n )\r\n return values", "def addHistogram2D(self, name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y):\n\t\tself.histograms[ name ] = ROOT.TH2F(name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y)", "def just_histogram(*args, **kwargs):\n return np.histogram(*args, **kwargs)[0].astype(float)", "def hist_data(list_source, frq=151, ln=False, data_lim=None):\n fluxes = []\n\n if data_lim is not None:\n min_acceptable = data_lim[0]\n else:\n min_acceptable = None\n if data_lim is not None:\n max_acceptable = data_lim[1]\n else:\n max_acceptable = None\n \n for gleam_obj in list_source:\n I = gleam_obj.flux_by_frq[frq]\n if is_constrained(I, min_acceptable, max_acceptable):\n if ln:\n fluxes.append(np.log(I))\n else:\n fluxes.append(I)\n \n return np.array(fluxes)", "def histogram(*args, bins=None, dim=None, weights=None, density=False,\n block_size='auto', bin_dim_suffix='_bin',\n bin_edge_suffix='_bin_edge'):\n\n N_args = len(args)\n\n # TODO: allow list of weights as well\n N_weights = 1 if weights is not None else 0\n\n # some sanity checks\n # TODO: replace this with a more robust function\n assert len(bins)==N_args\n for bin in bins:\n assert isinstance(bin, np.ndarray), 'all bins must be numpy arrays'\n\n for a in args:\n # TODO: make this a more robust check\n assert a.name is not None, 'all arrays must have a name'\n\n # we drop coords to simplify alignment\n args = [da.reset_coords(drop=True) for da in args]\n if N_weights:\n args += [weights.reset_coords(drop=True)]\n # explicitly broadcast so we understand what is going into apply_ufunc\n # (apply_ufunc might be doing this by itself again)\n args = list(xr.align(*args, join='exact'))\n\n\n\n # what happens if we skip this?\n #args = list(xr.broadcast(*args))\n a0 = args[0]\n a_dims = a0.dims\n\n # roll our own broadcasting\n # now manually expand the arrays\n all_dims = [d for a in args for d in a.dims]\n all_dims_ordered = list(OrderedDict.fromkeys(all_dims))\n args_expanded = []\n for a in args:\n expand_keys = [d for d in all_dims_ordered if d not in a.dims]\n a_expanded = a.expand_dims({k: 1 for k in expand_keys})\n args_expanded.append(a_expanded)\n\n # only transpose if necessary, to avoid creating unnecessary dask tasks\n args_transposed = []\n for a in args_expanded:\n if a.dims != all_dims_ordered:\n args_transposed.append(a.transpose(*all_dims_ordered))\n else:\n args.transposed.append(a)\n args_data = [a.data for a in args_transposed]\n\n if N_weights:\n weights_data = args_data.pop()\n else:\n weights_data = None\n\n if dim is not None:\n dims_to_keep = [d for d in all_dims_ordered if d not in dim]\n axis = [args_transposed[0].get_axis_num(d) for d in dim]\n else:\n dims_to_keep = []\n axis = None\n\n h_data = _histogram(*args_data, weights=weights_data, bins=bins, axis=axis,\n block_size=block_size)\n\n # create output dims\n new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]\n output_dims = dims_to_keep + new_dims\n\n # create new coords\n bin_centers = [0.5*(bin[:-1] + bin[1:]) for bin in bins]\n new_coords = {name: ((name,), bin_center, a.attrs)\n for name, bin_center, a in zip(new_dims, bin_centers, args)}\n\n old_coords = {name: a0[name]\n for name in dims_to_keep if name in a0.coords}\n all_coords = {}\n all_coords.update(old_coords)\n all_coords.update(new_coords)\n\n # CF conventions tell us how to specify cell boundaries\n # http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#cell-boundaries\n # However, they require introduction of an additional dimension.\n # I don't like that.\n edge_dims = [a.name + bin_edge_suffix for a in args[:N_args]]\n edge_coords = {name: ((name,), bin_edge, a.attrs)\n for name, bin_edge, a in zip(edge_dims, bins, args)}\n\n output_name = '_'.join(['histogram'] + [a.name for a in args[:N_args]])\n\n da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords,\n name=output_name)\n return da_out\n\n # we need weights to be passed through apply_func's alignment algorithm,\n # so we include it as an arg, so we create a wrapper function to do so\n # this feels like a hack\n # def _histogram_wrapped(*args, **kwargs):\n # alist = list(args)\n # weights = [alist.pop() for n in range(N_weights)]\n # if N_weights == 0:\n # weights = None\n # elif N_weights == 1:\n # weights = weights[0] # squeeze\n # return _histogram(*alist, weights=weights, **kwargs)", "def super_hist(self, data_list, alpha=0.5, log_scale=True, bins=45):\r\n\r\n fig, _ = mp.subplots(1, 1, figsize=(15, 10), constrained_layout=True)\r\n\r\n names = []\r\n for data in data_list:\r\n plot_data = data[data.Day_First_N_Infections != \"None\"]\r\n column_data = plot_data[\"Day_First_N_Infections\"].values\r\n sns.distplot(column_data,\r\n kde=False,\r\n bins=bins,\r\n hist_kws={\r\n \"linewidth\": 1,\r\n \"alpha\": alpha,\r\n \"edgecolor\": 'black',\r\n \"log\": log_scale\r\n })\r\n\r\n mp.legend(loc='upper left', fontsize=20)\r\n mp.xlabel(\"Days from outbreak to case number \" + str(data_list[0].N) +\r\n \" in county\",\r\n fontsize=18)\r\n mp.ylabel(\"Frequency\", fontsize=18)\r\n\r\n fig.savefig(\"hist_N\" + str(data_list[0].N) + \"_\" + \"_\".join(names) +\r\n \".png\")", "def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):\n n_arr, bins = np.histogram(data, bins, weights=wgt)\n ctr_bins = centerOfBins(bins)\n \n if pdf == True:\n n_arr = asFloat(n_arr) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))\n else:\n n_arr = asFloat(n_arr) * factor\n \n return n_arr, ctr_bins", "def get_proto_hist(var, name, nbins=None):\n ## Basic default settings, which will be used\n ## NOTE: In the future it is planned to make it possible to override these\n ## via a JSON file\n logging.debug('Getting prototype histogram for var: {}'.format(var))\n\n # there were root versions where this lead to automatic binning -> default\n hist = r.TH1D('', '', 100, 1, -1)\n\n hist_var = get_key_from_var(var)\n if hist_var:\n histset = default_hist_set[hist_var]\n if nbins is None:\n nbins = histset['n_bins']\n logging.debug('Using histogram settings {}'.format(histset))\n hist = r.TH1D(name, '',\n histset['n_bins'], histset['min'], histset['max'])\n set_hist_opts(hist)\n else:\n logging.warning('Could not get histogram settings for var: {}'\n .format(var))\n\n return hist" ]
[ "0.6048329", "0.59464675", "0.5933661", "0.5887746", "0.58725816", "0.57364476", "0.5714648", "0.5633713", "0.56145376", "0.5604098", "0.5588111", "0.5546958", "0.554628", "0.55081207", "0.549454", "0.54831433", "0.5475002", "0.5430655", "0.54043436", "0.53916496", "0.53677046", "0.53662616", "0.5340251", "0.530631", "0.52899975", "0.52799666", "0.52710634", "0.5262917", "0.5177245", "0.5161277" ]
0.73073107
0
Takes a cut function and tries to return a title for it
def get_title(fn): title = fn.name if hasattr(fn, 'name') else fn.__name__ title = title.replace('_cut_function','') suffix = [] # if 'JetsAK15_subleading_' in title: # suffix.append(r'$j^{\mathrm{AK15}}_{\mathrm{subl}}$') title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '') if hasattr(fn, 'left'): suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right)) # Transform variable name to title stirng title = svjflatanalysis.utils.get_title(title) if hasattr(fn, 'operator'): title += ' ' + fn.operator + ' cut' # Add the suffix title += ' ' + ' '.join(suffix) return title
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title():", "def make_title(words):", "def getTitle(test:str) -> str:\n return test[5:].strip()", "def PROPER(text):\n return text.title()", "def test_title(names):", "def title(value):\r\n title_word = lambda w: w if RE_UPPERCASE.search(w) else old_title(w)\r\n return re.sub('(\\S+)', lambda m: title_word(m.group(0)), value)", "def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]", "def get_title(line):\n title = line.split(' (')[0]\n return title", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def get_title(f):\n return os.path.basename(f)", "def _title(profile):\n if profile['operation'] == 'differential':\n p1, p2 = profile['profiles']\n return 'differential ({}, {})'.format(_title(p1), _title(p2))\n elif profile['operation'] == 'local feature':\n p = profile['profile']\n return 'local feature {} ({})'.format(profile['function'], _title(p))\n else:\n return ' '.join([str(x) for x in profile.values()])", "def print_title( title, decorators ):\n decorators = \"*\" * decorators\n print \"\\n%s %s: %s\\n\" % ( decorators, title, decorators )", "def test_getTitle(self):\n def checkNameAndTitle(name, titlesolution):\n title = self._nameClassifierBuilder._getTitle(name)\n self.assertEquals(titlesolution, title)\n\n checkNameAndTitle(\"Mrs. ldajfhgp\", \"Mrs\")\n checkNameAndTitle(\"dlsfajkMrdlkjaf\", \"Mr\")\n checkNameAndTitle(\"dagddgwdasJonkheer\", \"Jonkheer\")", "def get_title(self):\n return self.run_command('get_title')[0]", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def title(self):\n return self.run_command('title')[0]", "def testCapTitleAgain(self):\n val = capTitles(\"victor\") \n self.assertEqual(val, \"Victor\")", "def title(self) -> String:\n pass", "def truncate_title(title):\n return title if len(title) <= 70 else title[:70]+\"...\"", "def parse_title(title, various):\n if various and \" - \" in title:\n title = title.split(\" - \", 1)[1]\n return RE_FEAT.sub(\"\", title).rstrip()", "def get_title(self) -> str:\n pass", "def titleForSelection(self, selection):\n if selection is None or selection.filename is None:\n return None\n else:\n directory, filename = os.path.split(selection.filename)\n try:\n slicing = self.__formatSlices(selection.slice)\n except Exception:\n _logger.debug(\"Error while formatting slices\", exc_info=True)\n slicing = '[sliced]'\n\n permuted = '(permuted)' if selection.permutation is not None else ''\n\n try:\n title = self.TITLE_PATTERN.format(\n directory=directory,\n filename=filename,\n datapath=selection.datapath,\n slicing=slicing,\n permuted=permuted)\n except Exception:\n _logger.debug(\"Error while formatting title\", exc_info=True)\n title = selection.datapath + slicing\n\n return title", "def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def x_group_label(\n x_gr: int, cut: int = 20, name_dict: Dict[AnyStr, AnyStr] = names_dict\n) -> AnyStr:\n name = name_dict[str(x_gr)]\n if len(name) > cut:\n return f\"{name[:cut-3]}...\"\n else:\n return name", "def pathtitle(path):\n return thing_from_path(path).title", "def title_n(self):\n self.run_command('title_n')", "def favorite_book(title):\n print(\"You should really read \" + title.title() + \", it's my favorite!\")", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended." ]
[ "0.70298654", "0.6522897", "0.6284885", "0.61326414", "0.60484356", "0.6029634", "0.6029412", "0.6012764", "0.59930116", "0.598101", "0.5968265", "0.59671205", "0.5940044", "0.59055644", "0.58885586", "0.5842299", "0.5840335", "0.58212703", "0.58197415", "0.5815073", "0.5795473", "0.5779788", "0.57524157", "0.5747572", "0.5714877", "0.5682866", "0.5673082", "0.56689775", "0.56633717", "0.56559885" ]
0.78655964
0
The Windows version of base.processInterrupt Note! This doesn't work terribly well with a lot of processes.
def processInterrupt(uPid): try: # pylint: disable=no-member win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); #GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent #rc = GenerateConsoleCtrlEvent(1, uPid); #reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,)); fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); fRc = False; return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_process_interrupted(exc: \"KeyboardInterrupt\"):\n _print(f\"\\nInterrupted. {exc}\")", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def interrupt_kernel(self, kernel_id):", "def test_control_c_is_possible(self):\n if platform.type != \"posix\":\n raise SkipTest(\"I don't have the energy to fight Windows semantics.\")\n program = \"\"\"\\\nimport os, threading, signal, time, sys\nimport crochet\ncrochet.setup()\nfrom twisted.internet.defer import Deferred\n\nif sys.platform.startswith('win'):\n signal.signal(signal.SIGBREAK, signal.default_int_handler)\n sig_int=signal.CTRL_BREAK_EVENT\n sig_kill=signal.SIGTERM\nelse:\n sig_int=signal.SIGINT\n sig_kill=signal.SIGKILL\n\n\ndef interrupt():\n time.sleep(0.1) # Make sure we've hit wait()\n os.kill(os.getpid(), sig_int)\n time.sleep(1)\n # Still running, test shall fail...\n os.kill(os.getpid(), sig_kill)\n\nt = threading.Thread(target=interrupt, daemon=True)\nt.start()\n\nd = Deferred()\ne = crochet.EventualResult(d, None)\n\ntry:\n e.wait(10000)\nexcept KeyboardInterrupt:\n sys.exit(23)\n\"\"\"\n kw = {'cwd': crochet_directory}\n # on Windows the only way to interrupt a subprocess reliably is to\n # create a new process group:\n # http://docs.python.org/2/library/subprocess.html#subprocess.CREATE_NEW_PROCESS_GROUP\n if platform.type.startswith('win'):\n kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP\n process = subprocess.Popen([sys.executable, \"-c\", program], **kw)\n self.assertEqual(process.wait(), 23)", "def interrupt_kernel(self):", "def interrupt(self):\n raise NotImplementedError", "def interrupt_handler(signum, frame): #pylint: disable=W0613\n cleanup()\n sys.exit(-2) # Terminate process here as catching the signal\n # removes the close process behaviour of Ctrl-C", "def interrupt(self):\n # Access 'interrupted' with mutual exclusion\n with self.ilock:\n self.interrupted = True", "def siginterrupt(sig, flag): # real signature unknown; restored from __doc__\n pass", "def interrupt_hanged_processes(profile=\"bluefog\"):\n engine_pids = _get_ipengine_pid_from_file(profile)\n if engine_pids is None:\n raise FileNotFoundError(\"Cannot find pids to interrupt the engines. Note this\"\n \"function is supported under localhost mode only\")\n timeout = 0.2\n\n def send_request_to_rc(i):\n rc = ipp.Client(profile=profile)\n rc[i].apply_sync(lambda: 0)\n\n # Send an empty function to the workers. If it cannot be finished within the\n # {timeout} second, we assume the worker is hanged then send the interrupt\n # signal to it. If finished, do nothing.\n p_list = []\n for i in range(len(engine_pids)):\n p = multiprocessing.Process(target=send_request_to_rc, args=(i,))\n p.start()\n p_list.append(p)\n for i, p in enumerate(p_list):\n p.join(timeout)\n if p.exitcode is None:\n try:\n os.kill(engine_pids[str(i)], signal.SIGINT)\n print(f\"send signal to {engine_pids[i]}\")\n except:\n pass", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def processKill(uPid):\n return processTerminate(uPid);", "def interrupt(self):\n self.interrupt_tick_tocking = True", "def interrupt(self):\r\n self.interrupting = True", "def interrupt(func):\n def do_stuff(*args, **kwargs):\n App.get_running_app().controller.interrupt(restart=True)\n return func(*args, **kwargs)\n return do_stuff", "def test_control_c_is_possible(self):\n if platform.type != \"posix\":\n raise SkipTest(\"I don't have the energy to fight Windows semantics.\")\n program = \"\"\"\\\nimport os, threading, signal, time, sys\nimport crochet\ncrochet.setup()\nfrom twisted.internet.defer import Deferred\n\nif sys.platform.startswith('win'):\n signal.signal(signal.SIGBREAK, signal.default_int_handler)\n sig_int=signal.CTRL_BREAK_EVENT\n sig_kill=signal.SIGTERM\nelse:\n sig_int=signal.SIGINT\n sig_kill=signal.SIGKILL\n\n\ndef interrupt():\n time.sleep(0.1) # Make sure we've hit wait()\n os.kill(os.getpid(), sig_int)\n time.sleep(1)\n # Still running, test shall fail...\n os.kill(os.getpid(), sig_kill)\n\nt = threading.Thread(target=interrupt, daemon=True)\nt.start()\n\n@crochet.%s\ndef wait():\n return Deferred()\n\ntry:\n wait()\nexcept KeyboardInterrupt:\n sys.exit(23)\n\"\"\" % (self.DECORATOR_CALL, )\n kw = {'cwd': crochet_directory}\n if platform.type.startswith('win'):\n kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP\n process = subprocess.Popen([sys.executable, \"-c\", program], **kw)\n self.assertEqual(process.wait(), 23)", "def suppress_keyboard_interrupt_message():\n old_excepthook = sys.excepthook\n\n def new_hook(type, value, traceback):\n if type != KeyboardInterrupt:\n old_excepthook(type, value, traceback)\n else:\n pass\n\n sys.excepthook = new_hook", "def setinterrupt(self, chr: int, /) -> None:", "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def interrupt_script(self, kind=\"default\"):\n pass", "async def keyboard_interrupt(self) -> None:\n self.logger.debug(\"Keyboard interrupt start\")\n print(\"Create task\")\n self.task = asyncio.create_task(self.target_coroutine)\n process_id = await self.get_process_id\n try:\n # Reason: only for Windows. pylint: disable=no-member\n os.kill(process_id, signal.CTRL_C_EVENT) # type: ignore\n print(\"Await task\")\n await self.task\n except KeyboardInterrupt:\n print(\"Await task in except\")\n # await self.task\n print(\"Assert\")\n assert not self.task.done()\n print(\"Task not done\")\n assert not self.task.cancelled()\n print(\"Task cancelled\")\n raise", "def stop_subprocesses():\n global message_interface\n global c_library_interface\n if message_interface:\n message_interface.stop()\n if c_library_interface:\n c_library_interface.stop()", "def runKeyboardInterruptable(target, *args, **kwargs ):\n kit = KeyboardInterruptable.KeyboardInterruptable(targetcmd=target, \n cancelException=carma.util.CancelException, args=args, \n kwargs=kwargs )\n try:\n kit.start()\n except Exception, ex:\n print 'Caught exception on dispatch!!'\n print ex\n done = True\n keyboardInterrupted = False\n done = False\n try:\n while not done:\n try:\n done = kit.doneWaiting()\n except KeyboardInterrupt:\n print \"\\nCancelling %s...\"%target.__name__\n cancel()\n keyboardInterrupted = True\n except Exception, ex:\n print ex\n done = True\n if not kit.cleanFinish:\n print kit.getExceptionInfo()\n return kit.ret\n except Exception, ex:\n print ex\n done = True\n finally:\n kit.join()\n if kit.successfulCancel: \n print \"%s successfully cancelled\" %target.__name__\n raise SuccessfulCancel\n if keyboardInterrupted and done:\n # The calling function didn't terminate from cancel\n # so reraise the interrupt.\n raise KeyboardInterrupt", "def ctrl_c(signum, frame):\n global shutdown_event\n raise SystemExit('\\nCancelling...')", "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = terminate_nix\n\n handlers = {\n \"win32\": terminate_win, \n \"linux2\": terminate_nix\n }\n\n return handlers.get(sys.platform, terminate_default)(process)", "def keyboard_interrupt_handler(sig: int, _: object) -> None:\n logger.warning(f'KeyboardInterrupt (id: {sig}) has been caught...')\n logger.info('Terminating the session gracefully...')\n ray.shutdown()\n minio_leftovers = glob('*.part.minio')\n for leftover in minio_leftovers:\n Path(leftover).unlink()\n sys.exit(1)", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def _interrupt(self, signum: int, frame: Optional[Any]) -> None:\n if self._in_task(frame):\n raise KeyboardInterrupt\n else:\n self._interrupted = True\n self._ready_tasks.interrupt()" ]
[ "0.63215697", "0.5775936", "0.57586217", "0.5744429", "0.5740155", "0.5561683", "0.5558111", "0.5497653", "0.5479278", "0.54543006", "0.54173666", "0.541069", "0.5367018", "0.53545386", "0.5337776", "0.53182185", "0.5305628", "0.52493083", "0.522078", "0.5160787", "0.51373094", "0.5127463", "0.51083744", "0.5097883", "0.5093123", "0.5051432", "0.503602", "0.50336325", "0.50290835", "0.5011577" ]
0.67192686
0
Posts a WM_CLOSE message to the specified thread.
def postThreadMesssageClose(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageQuit(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def close(self):\n\n self.queue.put(\"EXITTHREAD\")\n logging.info(\"in close in thread\")\n try:\n # send closing message immediately\n if self.ircSocket:\n self.ircSocket.send(\n (\n f\"PRIVMSG {self.channel} :closing opponent\"\n \" bot\\r\\n\").encode('utf8')\n )\n while self.channelThread.is_alive():\n pass\n self.running = False\n if self.messageBufferTimer:\n self.messageBufferTimer.cancel()\n except Exception as e:\n logging.error(\"In close\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def closeEvent(self, _):\n for thread in self.threads:\n thread.stop()\n thread.wait()", "def closeEvent(self, event):\n # Stop the run loop in the mandelbrot thread\n self.pipe_to_mandelbrot_thread.send(\"STOP\")\n # Shutdown queues to allow underlying processes to join\n self.display_queue.close()\n self.mandelbrot_queue.close()\n # Join is blocking - waits for thread to exit nicely\n self.mandelbrot_thread.join()\n # Once the compute thread is done, accept the original close event\n event.accept()", "def closeEvent(self, event):\n self._thread.terminate()\n self._thread.wait()\n event.accept()", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()", "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def close(self):\r\n self._sendLock.acquire()\r\n try:\r\n self._queue.put(\"CLOSE\")\r\n self._eventQueue.put((time.time(), \"CLOSE\"))\r\n self._closed = 1\r\n self._s.close()\r\n self._thread.join()\r\n self._eventThread.join()\r\n finally:\r\n self._sendLock.release()", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def close(self):\n self.loop.call_soon_threadsafe(self.stop_flag.set_result, True)\n self.server_thread.join()", "def bcp_goodbye(self, **kwargs):\n if self.config['mediacontroller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()", "def ev_windowclose(self, event: tcod.event.WindowEvent) -> T | None:", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self.m_disp != None:\r\n self.m_disp.BeginInvokeShutdown()\r\n self.m_disp = None", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self._m_disp != None:\r\n self._m_disp.BeginInvokeShutdown()\r\n self._m_disp = None", "def close(self):\n if not self.closed:\n log.debug(\"Closing worker thread\")\n\n self.closed = True\n if self._wait:\n self._wait.set()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def on_close(self, event):\r\n if self.thread is not None:\r\n self.thread.abort = True\r\n if self.tester is not None:\r\n try:\r\n self.tester.Close()\r\n except:\r\n pass\r\n self.close_debug_console()\r\n event.Skip()", "def main_thread_exit(self):\n ...", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def shutdown(self):\n self.socket_thread.stop()", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def closeEvent(self, evt):\n self.__shutdown()", "def shutdown(self, signum, frame):\n self.serverSocket.close()\n sys.exit(0)", "def kill(self, threadid):\n self.rpc.call(MsfRpcMethod.CoreThreadKill, [threadid])", "def CloseForum(self, event):\n pass", "def on_close(self):\n print('[INFO] closing...')\n self.stopEvent.set()\n del self.tello\n self.root.quit()" ]
[ "0.65258104", "0.647808", "0.6194087", "0.6059737", "0.6028249", "0.5981587", "0.59607416", "0.5957837", "0.5957837", "0.5957837", "0.5957837", "0.5846168", "0.58057123", "0.57886297", "0.574851", "0.5722326", "0.5718076", "0.57172066", "0.5709524", "0.5701404", "0.5693792", "0.568644", "0.56556547", "0.5641714", "0.56399125", "0.56164086", "0.5593753", "0.55523497", "0.553725", "0.5534733" ]
0.7335398
0
Posts a WM_QUIT message to the specified thread.
def postThreadMesssageQuit(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageClose(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def quit(self, message):\n self.write(['QUIT'], message)\n self.hasquit = True\n # Wait for acknowledgement from the server. By RFC 2812 it should be\n # an ERROR msg, but many servers just close the connection. Either way\n # is fine by us.\n # Closing the connection now would mean that stuff in the buffers that\n # has not yet been processed would never be processed. It would also\n # release the main thread, which is problematic because whomever called\n # quit might still want to do something before main thread quits.", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def ev_QUIT(self, event):\n raise SystemExit()", "def quit(self):\n self.socket.send(\"QUIT\")", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def main_thread_exit(self):\n ...", "def ev_quit(self, event: tcod.event.Quit) -> T | None:", "def do_quit(self, arg):\n self.do_exit(arg)", "def onQuit(self, eventDict = None):\n self.mainApp.quit()", "def cleanThread(self):\n logging.info(\"Clean Thread\")\n self.thread.quit()\n self.thread.wait()", "def _quit(self):\n self.parent.quit() # stops mainloop\n self.parent.destroy() # this is necessary on Windows to prevent\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\n reactor.stop()", "def do_quit(self, arg):\n exit()", "def _quit():\r\n\twin.quit()\r\n\twin.destroy()\r\n\tquit()", "def ftp_QUIT(self, line):\n # From RFC-959:\n # This command terminates a USER and if file transfer is not\n # in progress, the server closes the control connection.\n # If file transfer is in progress, the connection will remain\n # open for result response and the server will then close it.\n if self.authenticated:\n msg_quit = self.authorizer.get_msg_quit(self.username)\n else:\n msg_quit = \"Goodbye.\"\n if len(msg_quit) <= 75:\n self.respond(\"221 %s\" %msg_quit)\n else:\n self.push(\"221-%s\\r\\n\" %msg_quit)\n self.respond(\"221 \")\n\n if not self.data_channel:\n self.close_when_done()\n else:\n # tell the cmd channel to stop responding to commands.\n self.quit_pending = True\n\n\n # --- data transferring", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def SignalHandler_Quit(signum, frame):\n log('Received signal to quit: %s' % signum)\n \n global RUNNING\n RUNNING = False", "def quit_cmd(self):\n print_debug(\"Executing QUIT\")\n command = \"QUIT\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n self.close_socket(self.s) # Close socket since we're done.\n return msg_rec", "def quit(self):\r\n \r\n self.qapp.quit()", "def shutdown():\n\tglobal StoreWorkerThread, StoreWorkerThreadLock\n\n\tStoreWorkerThreadLock.acquire()\n\t\n\tif not running():\n\t\t# for convenience, this is not an error\n\t\tStoreWorkerThread = None\n\t\tStoreWorkerThreadLock.release()\n\t\treturn\n\t\t\n\t# send 'quit' command\n\tStoreCmdQueue.put(('quit',))\n\t\n\t# wait for thread to exit\n\tStoreWorkerThread.join()\n\tStoreWorkerThread = None\n\t\n\tStoreWorkerThreadLock.release()", "def do_quit(self, args):\n quit()", "def OnQuit(self, e):\n\t\tself.EndRun()", "async def chat_quit(self, event):\n await self.send_json(\n return_value(\n ACTION_QUIT,\n event['label'],\n event['username'],\n MSG_ALERT,\n NO_MESSAGE\n )\n )", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def quitme(self, evt=None):\n if evt:\n self.dbgprint(\"bye!\")\n sys.exit()", "def request_quit(self):\n self._socketpair[1].send(b'\\x00')", "def request_quit(self):\n self._socketpair[1].send(b'\\x00')", "def command_quit(self, arg):\n self.write('221 Bye', self.finish)", "def delete_thread(self, thread_uid: str):\n pass", "def clickQuit(self, event):\n self.quitFlag = True" ]
[ "0.6247474", "0.6094396", "0.58557373", "0.57782835", "0.57772505", "0.5738489", "0.5506181", "0.5447505", "0.5402179", "0.5354124", "0.5341512", "0.5266031", "0.5251934", "0.52516365", "0.52507806", "0.5238659", "0.5238613", "0.5237029", "0.5236302", "0.5228808", "0.522184", "0.52120554", "0.5207501", "0.5206864", "0.51987433", "0.5194935", "0.5194935", "0.5187494", "0.5186202", "0.51601225" ]
0.7239463
0
The Windows version of base.processKill
def processKill(uPid): return processTerminate(uPid);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))", "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = terminate_nix\n\n handlers = {\n \"win32\": terminate_win, \n \"linux2\": terminate_nix\n }\n\n return handlers.get(sys.platform, terminate_default)(process)", "def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])", "def remote_kill():", "def kill(self, pid, returncode):\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)", "def _kill_kernel(self):", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n\n self.proc.kill()", "def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)", "def _on_parent_process_kill(self):", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def kill(self):\n processes = ['MicrosoftEdge.exe', 'MicrosoftEdgeCP.exe', 'plugin-container.exe',\n 'browser_broker.exe', 'smartscreen.exe']\n for exe in processes:\n subprocess.call(['taskkill', '/F', '/T', '/IM', exe])", "def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def kill(self):\n if self.process is not None:\n LOGGER.info('Killing command...')\n self.process.kill()\n self.process = None", "def kill_process(proc):\r\n p1_group = psutil.Process(proc.pid)\r\n\r\n child_pids = p1_group.get_children(recursive=True)\r\n\r\n for child_pid in child_pids:\r\n os.kill(child_pid.pid, signal.SIGKILL)", "def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def killProcess(self):\n if self._processEnded:\n return defer.succeed(None)\n self.onProcessEnd = defer.Deferred()\n self.transport.signalProcess('KILL')\n return self.onProcessEnd", "def terminate_proc(proc_name=None, proc_id=None):\n assert proc_name or proc_id, \"Neither 'proc_name' nor 'proc_id' are passed.\"\n if sys.platform == \"win32\":\n if proc_name:\n query = ['/fi', 'IMAGENAME eq %s' % proc_name]\n elif proc_id:\n query = ['/fi', 'PID eq %s' % proc_id]\n output = killableprocess.check_output(\n ['tasklist',\n '/nh', # don't display column headers\n '/fo', 'CSV'] # --> \"MyApp.exe\",\"4380\",\"Console\",\"1\",\"395.604 K\"\n + query)\n output = output.decode(sys.getfilesystemencoding())\n proc_ids = []\n for line in output.decode(sys.getfilesystemencoding()).split(\"\\n\"):\n line = line.replace(\"\\r\", \"\")\n if '\"' in line:\n proc_ids.append(eval('[%s]' % line)[1])\n for id in proc_ids:\n killableprocess.call(['taskkill', '/f', '/t', '/pid', id])\n else:\n pass # necessary ?", "def kill(self):\n self._process.kill()", "def kill(self):\n self._stop_proc(signal.SIGKILL)" ]
[ "0.726238", "0.6810974", "0.6760341", "0.6577071", "0.63967526", "0.63236195", "0.63068575", "0.6289544", "0.6275234", "0.6238362", "0.62296176", "0.6153977", "0.6149264", "0.6146983", "0.6144532", "0.61424387", "0.6141811", "0.60982275", "0.60788274", "0.6070093", "0.6050515", "0.60378826", "0.6020217", "0.5984187", "0.5954449", "0.5941723", "0.5895441", "0.5889775", "0.5874231", "0.5865436" ]
0.74643934
0
The Windows version of base.processCheckPidAndName
def processCheckPidAndName(uPid, sName): fRc = processExists(uPid); if fRc is True: try: from win32com.client import GetObject; # pylint: disable=F0401 oWmi = GetObject('winmgmts:'); aoProcesses = oWmi.InstancesOf('Win32_Process'); for oProcess in aoProcesses: if long(oProcess.Properties_("ProcessId").Value) == uPid: sCurName = oProcess.Properties_("Name").Value; reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName)); sName = sName.lower(); sCurName = sCurName.lower(); if os.path.basename(sName) == sName: sCurName = os.path.basename(sCurName); if sCurName == sName \ or sCurName + '.exe' == sName \ or sCurName == sName + '.exe': fRc = True; break; except: reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def get_pid_name(pid):\n try:\n with open(os.path.join('/proc/', pid, 'cmdline'), 'r') as pidfile:\n try:\n cmd = pidfile.readline().split()[0]\n return os.path.basename(cmd).rstrip('\\x00')\n except IndexError:\n # no cmd returned\n return \"<NO NAME>\"\n except IOError:\n # upstream wait any string, no matter if we couldn't read proc\n return \"no_such_process\"", "def get_name(pid, default=None):\n try:\n return only(\n process.Properties_(\"Name\").Value\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')\n if process.Properties_(\"ProcessID\").Value == pid\n )\n except TooFewItemsError:\n return default", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode", "def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def getPidByName(process_name):\n \n pid = None\n count = 0\n try:\n hProcessSnap = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)\n pe32 = PROCESSENTRY32()\n pe32.dwSize = sizeof(PROCESSENTRY32)\n ret = kernel32.Process32First(hProcessSnap , byref(pe32))\n while ret:\n if pe32.szExeFile == LPSTR(process_name).value:\n pid = pe32.th32ProcessID\n count += 1\n ret = kernel32.Process32Next(hProcessSnap, byref(pe32))\n kernel32.CloseHandle (hProcessSnap)\n \n except Exception, e:\n debug_print(str(e))\n \n if not pid:\n debug_print(\"Could not find %s PID\" % process_name)\n \n return pid", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def cli_get_process_title():\n raise NotImplementedError()", "def check_processes(self, name: Optional[str] = None) -> str:\n\n for process in self.processes:\n if not process.is_running():\n self.processes.remove(process)\n continue\n\n cmdline = \" \".join(process.cmdline())\n port = re.findall(r\"--port=(\\d+)\", cmdline)\n port = port[0] if port else \"\"\n\n if re.findall(r\"-m\\s+.*streamlit_run|streamlit\", cmdline):\n return f\"http://localhost:{port}/{name}\"\n\n return \"\"", "def is_windows():\n return os.name == \"nt\"", "def get_pid_from_name(process_name:str) -> int:\r\n\tfor process in psutil.process_iter():\r\n\t\tif process_name in process.name():\r\n\t\t\treturn process.pid\r\n\traise ProcessLookupError(\"process '\" + process_name + \"' not found.\")", "def get_process_name(self):\n\n return self._args.t", "def is_windows():\r\n return sys.platform == \"win32\"", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def is_windows():\n return sys.platform == \"win32\"", "def windows_name(self):\n return self._windows_name", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')" ]
[ "0.6427718", "0.627324", "0.6176712", "0.6168791", "0.61007553", "0.6070687", "0.60217047", "0.60116947", "0.5992538", "0.5970814", "0.59083015", "0.5876524", "0.58620656", "0.5821407", "0.5805197", "0.57778066", "0.5776582", "0.5765953", "0.57581586", "0.57557404", "0.57374895", "0.5732777", "0.5712636", "0.5705129", "0.57000494", "0.5695862", "0.56845045", "0.5672988", "0.56560445", "0.5639912" ]
0.71424156
0
Logs windows memory stats.
def logMemoryStats(): class MemoryStatusEx(ctypes.Structure): """ MEMORYSTATUSEX """ kaFields = [ ( 'dwLength', ctypes.c_ulong ), ( 'dwMemoryLoad', ctypes.c_ulong ), ( 'ullTotalPhys', ctypes.c_ulonglong ), ( 'ullAvailPhys', ctypes.c_ulonglong ), ( 'ullTotalPageFile', ctypes.c_ulonglong ), ( 'ullAvailPageFile', ctypes.c_ulonglong ), ( 'ullTotalVirtual', ctypes.c_ulonglong ), ( 'ullAvailVirtual', ctypes.c_ulonglong ), ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ), ]; _fields_ = kaFields; # pylint: disable=invalid-name def __init__(self): super(MemoryStatusEx, self).__init__(); self.dwLength = ctypes.sizeof(self); try: oStats = MemoryStatusEx(); ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats)); except: reporter.logXcpt(); return False; reporter.log('Memory statistics:'); for sField, _ in MemoryStatusEx.kaFields: reporter.log(' %32s: %s' % (sField, getattr(oStats, sField))); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def memory_snapshot(tag, rank):\n GB = 1024 * 1024 * 1024\n MB = 1024 * 1024\n KB = 1024\n\n peak = dgl.partition.get_peak_mem() * KB\n mem = psutil.virtual_memory()\n avail = mem.available / MB\n used = mem.used / MB\n total = mem.total / MB\n\n mem_string = f\"{total:.0f} (MB) total, {peak:.0f} (MB) peak, {used:.0f} (MB) used, {avail:.0f} (MB) avail\"\n logging.debug(f\"[Rank: {rank} MEMORY_SNAPSHOT] {mem_string} - {tag}\")", "def show_process_memory( cls, call_msg = \"\", log_level = None, print_it = False ):\n process = psutil.Process(os.getpid()) # import psutil\n mem = process.memory_info().rss\n # convert to mega and format\n mem_mega = mem/( 1e6 )\n msg = f\"{call_msg}process memory = {mem_mega:10,.2f} mega bytes \"\n if print_it:\n print( msg )\n if not ( log_level is None ):\n cls.__logger.log( log_level, msg )\n msg = f\"{mem_mega:10,.2f} mega bytes \"\n return ( mem, msg )", "def print_memory_diags(disable_print=False):\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss/1000000000.0\n if not disable_print:\n logging.info('\\tMemory usage: {:.3f} GB'.format(memory))\n return memory", "def _mem_report(tensors: Iterable, mem_type: str) -> None:\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def record_memory_map(self):\n memory_map = self.get_memory_map()\n self._memory_map_records.append(memory_map)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def show_mem_usage():\n gl = sys._getframe(1).f_globals\n vars = {}\n for k, v in list(gl.items()):\n # for pandas dataframes\n if hasattr(v, 'memory_usage'):\n mem = v.memory_usage(deep=True)\n if not np.isscalar(mem):\n mem = mem.sum()\n vars.setdefault(id(v), [mem]).append(k)\n # work around for a bug\n elif isinstance(v, pd.Panel):\n v = v.values\n vars.setdefault(id(v), [sys.getsizeof(v)]).append(k)\n total = 0\n for k, (value, *names) in vars.items():\n if value > 1e6:\n print(names, \"%.3fMB\" % (value / 1e6))\n total += value\n print(\"%.3fMB\" % (total / 1e6))", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def get_memory_info():\n return psutil.virtual_memory()", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def get_memory_usage(cls):\n\n mem_stats = psutil.virtual_memory()\n\n mem_stats_dict = { StatsKeys.MEMORY :\n {\n StatsKeys.TOTAL : mem_stats.total,\n StatsKeys.AVAILABLE : mem_stats.available,\n StatsKeys.USED : mem_stats.used\n }\n }\n logger.debug(\"Memory stats: {}\".format(mem_stats_dict))\n\n return mem_stats_dict", "def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}", "def memory():\n\twith open('/proc/meminfo','r') as mem:\n\t\tret = {}\n\t\ttmp = 0\n\t\tfor i in mem:\n\t\t\tsline = i.split()\n\t\t\tif str(sline[0])=='MemTotal:':\n\t\t\t\tret['total'] = int(sline[1]*1.0e-6)\n\treturn ret", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def get_memory() -> dict:\n import os\n\n import psutil\n\n proc = psutil.Process(os.getpid())\n return proc.memory_info()", "def get_mem_info():\n import psutil\n vm = psutil.virtual_memory()\n return {\n \"memtotal\": vm.total,\n \"memavailable\": vm.available,\n }", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def dumpMemory():\n libxml2mod.xmlDumpMemory()", "def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Memory Usage Statistics\",\n \"/statistics/systems/memory.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)" ]
[ "0.70418596", "0.6867534", "0.643289", "0.6419121", "0.6389766", "0.6379068", "0.59949344", "0.5943068", "0.5942947", "0.5925752", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.59162056", "0.58963734", "0.5882967", "0.5870274", "0.5868042", "0.58618057", "0.58380634", "0.5837955", "0.5830046", "0.5812539", "0.5805553", "0.5799975", "0.578249" ]
0.798583
0
Calls HeapValidate(GetProcessHeap(), 0, NULL);
def checkProcessHeap(): # Get the process heap. try: hHeap = ctypes.windll.kernel32.GetProcessHeap(); except: reporter.logXcpt(); return False; # Check it. try: fIsOkay = ctypes.windll.kernel32.HeapValidate(hHeap, 0, None); except: reporter.logXcpt(); return False; if fIsOkay == 0: reporter.log('HeapValidate failed!'); # Try trigger a dump using c:\utils\procdump64.exe. from common import utils; iPid = os.getpid(); asArgs = [ 'e:\\utils\\procdump64.exe', '-ma', '%s' % (iPid,), 'c:\\CrashDumps\\python.exe-%u-heap.dmp' % (iPid,)]; if utils.getHostArch() != 'amd64': asArgs[0] = 'c:\\utils\\procdump.exe' reporter.log('Trying to dump this process using: %s' % (asArgs,)); utils.processCall(asArgs); # Generate a crash exception. ctypes.windll.msvcrt.strcpy(None, None, 1024); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)\n\n cmd = \"deref $_heap(0x10+0x10)\"\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)", "def test_validate_factorial_heap_pq(self):\n from ch04.factorial_heap import PQ, validate\n\n end = 10000\n pq = PQ(end)\n for i in range(end):\n pq.enqueue(i, i)\n validate(pq)\n\n last = end-1\n while pq:\n self.assertEqual(last, pq.dequeue())\n last -= 1\n validate(pq)", "def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)", "def test_pop(self):\n self.assertRaises(EmptyHeapException, self.minheap.pop)\n self.minheap.heap = [0, 1, 4, 7, 9]\n assert self.minheap.pop() == 1\n assert self.minheap.heap == [0, 4, 9, 7]", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def test_static_is_heap(self):\n good = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n bad = [1,2,3,114,5,6,7,8,9,10]\n\n self.assertTrue(Heap.is_heap(good), 'should hold the heap property')\n self.assertFalse(Heap.is_heap(bad), 'should not hold the heap property')", "def CalcNewErrorMeasures(self):\n for p in self.Active[:self.N_Active]:\n if self.Errors[p] < 0.0:\n #print self.CalcErrorMeasure(p), p\n self.Errors[p] = self.CalcErrorMeasure(p)\n # Add new values to the heap\n self.Active[:self.heap_length+1],dummy= maxheap.heap_insert(self.Errors[:self.N_Idx], \n p, self.Active[:self.heap_length+1],\n self.heap_length)\n self.heap_length +=1\n \n if self.heap_length != self.N_Active:\n raise ValueError", "def hxlvalidate():\n run_script(hxlvalidate_main)", "def __validate():\n # TODO: implement", "def minHeap(self):\n for pos in range(self.size // 2, 0, -1):\n self.minHeapify(pos)", "def CleanUp(self):\n if self.process != 0 and self.mem_address != 0:\n # free up the memory we allocated\n #win32api.SetLastError(0)\n self.CheckGuardSignature()\n\n ret = win32functions.VirtualFreeEx(\n c_void_p(self.process),\n c_void_p(self.mem_address),\n win32structures.ULONG_PTR(0),\n wintypes.DWORD(win32defines.MEM_RELEASE))\n if ret == 0:\n print('Error: CleanUp: VirtualFreeEx() returned zero for address ', hex(self.mem_address))\n last_error = win32api.GetLastError()\n print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())\n sys.stdout.flush()\n self._CloseHandle()\n raise WinError()\n self.mem_address = 0\n self._CloseHandle()\n else:\n pass # ActionLogger().log('\\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')", "def heapleak():\n for i in range(16):\n evl('{}'.format(i))\n\n # Trigger heap info leak\n evl('h=0+0')\n return readintvar('h') & 0xfffffffffffff000", "def validate(self):\n errors = []\n app = errors.append\n\n if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:\n app(\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\")\n\n if self.omp_threads > self.hw.cores_per_node:\n app(\"omp_threads > hw.cores_per_node\")\n\n if self.mem_per_proc > self.hw.mem_per_node:\n app(\"mem_mb >= self.hw.mem_per_node\")\n\n if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:\n app(\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\")\n\n if self.priority <= 0:\n app(\"priority must be > 0\")\n\n if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):\n app(\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\")\n\n if errors:\n raise self.Error(str(self) + \"\\n\".join(errors))", "def bad_cgroup_processes_check():\n return CGCheck([], bad_cgroup_processes)", "def sanity_check(self, test_vec_handle):\n self.vec_space.sanity_check(test_vec_handle)", "def __init__(self, heap_used=None, heap_committed=None, heap_max=None, non_heap_used=None, non_heap_committed=None, non_heap_max=None, direct_count=None, direct_used=None, direct_max=None, mapped_count=None, mapped_used=None, mapped_max=None, memory_segments_available=None, memory_segments_total=None, garbage_collectors=None): # noqa: E501 # noqa: E501\n self._heap_used = None\n self._heap_committed = None\n self._heap_max = None\n self._non_heap_used = None\n self._non_heap_committed = None\n self._non_heap_max = None\n self._direct_count = None\n self._direct_used = None\n self._direct_max = None\n self._mapped_count = None\n self._mapped_used = None\n self._mapped_max = None\n self._memory_segments_available = None\n self._memory_segments_total = None\n self._garbage_collectors = None\n self.discriminator = None\n if heap_used is not None:\n self.heap_used = heap_used\n if heap_committed is not None:\n self.heap_committed = heap_committed\n if heap_max is not None:\n self.heap_max = heap_max\n if non_heap_used is not None:\n self.non_heap_used = non_heap_used\n if non_heap_committed is not None:\n self.non_heap_committed = non_heap_committed\n if non_heap_max is not None:\n self.non_heap_max = non_heap_max\n if direct_count is not None:\n self.direct_count = direct_count\n if direct_used is not None:\n self.direct_used = direct_used\n if direct_max is not None:\n self.direct_max = direct_max\n if mapped_count is not None:\n self.mapped_count = mapped_count\n if mapped_used is not None:\n self.mapped_used = mapped_used\n if mapped_max is not None:\n self.mapped_max = mapped_max\n if memory_segments_available is not None:\n self.memory_segments_available = memory_segments_available\n if memory_segments_total is not None:\n self.memory_segments_total = memory_segments_total\n if garbage_collectors is not None:\n self.garbage_collectors = garbage_collectors", "def is_in_heap(self, address):\n return self.is_address_of_type(address, MemoryType.MajorHeap, MemoryType.MinorHeap)", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def __init__(self):\r\n self.maxHeap = []\r\n self.minHeap = []", "def __init__(self):\n self.max_heap = MaxHeap()\n self.min_heap = MinHeap()", "def test_free_space_rejects_file_arguments():\n result = _run_metric('free_space', '/etc/hosts')\n # 2 is the exit code for a UsageError, which includes bad parameters.\n assert result.exit_code == 2\n # Is this too fragile?\n assert 'Invalid value' in result.output", "def modifyHeapSizeProperties(self):\n pass", "def validate():", "def _sanity_check_m2ee_stats(m2ee_stats):\n for memory_type, memory_value in m2ee_stats[\"memory\"].items():\n if not isinstance(memory_value, int):\n # Memorypools are here and are stored as a dict\n continue\n\n if memory_value < 0:\n # memory value can be zero, but not negative\n logging.error(\n \"Memory stats with non-logical values: %s\",\n m2ee_stats[\"memory\"],\n )\n raise RuntimeError(\n \"Memory statistics have non-logical values. This will \"\n \"cause incorrect data in your application's metrics. \"\n \"Please contact support!\"\n )", "def validate(self, tracked_pids, test_case=stubTestcase, debug=False):\n \n out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)\n dmesg_lines = out.readlines()\n out.close()\n \n allocations = []\n memory_allocated = False\n \n if debug:\n f = open('mm_debug.txt', 'w+')\n f.write('All KMALLOC/KFREE messages:\\n\\n')\n f.write(''.join(dmesg_lines))\n f.write('\\nTracked pids: %s\\nOnly relevant KMALLOC/KFREE messages:\\n' % repr(tracked_pids))\n \n for line in dmesg_lines:\n re_result = re.search(r'.*?(KMALLOC|KFREE) (\\d*) (\\w*)', line)\n if not re_result:\n continue\n \n action = re_result.group(1)\n pid = int(re_result.group(2))\n address = re_result.group(3)\n \n if pid not in tracked_pids:\n continue\n \n f.write(line)\n\n f.write('\\nProcessing KMALLOC/KFREE messages:\\n')\n \n try:\n for line in dmesg_lines:\n re_result = re.search(r'.*?(KMALLOC|KFREE) (\\d*) (\\w*)', line)\n if not re_result:\n continue\n \n action = re_result.group(1)\n pid = int(re_result.group(2))\n address = re_result.group(3)\n \n if pid not in tracked_pids:\n continue\n \n if debug:\n f.write(line)\n \n if action == 'KMALLOC':\n memory_allocated = True\n if address in allocations:\n test_case.fail('Same address, %s, allocated twice without release.' % address)\n break\n allocations.append(address)\n \n if action == 'KFREE':\n if address not in allocations:\n test_case.fail('Freeing a non allocated address, %s.' % address)\n break\n allocations.remove(address)\n else:\n test_case.assert_(memory_allocated, 'No memory allocated during execution.') \n test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\\n%s' % (len(allocations), '\\n'.join(allocations)))\n finally:\n if debug:\n f.close()", "def test_prevent_wrong_memory(self):\n self.assertRaises(cinv.host.Error, self.wrong_memory)", "def checkmem(self,file_,line_): # 3\n res = self.__obj.checkmemtask(file_,line_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __init__(self):\n self.heapList = [0]\n self.currentSize = 0" ]
[ "0.5956563", "0.5820966", "0.5391256", "0.5279991", "0.52643716", "0.52414745", "0.5221388", "0.51908994", "0.5157347", "0.51529896", "0.5116927", "0.50405", "0.50393564", "0.5023326", "0.5012351", "0.5010514", "0.5008411", "0.50031483", "0.4997377", "0.49946463", "0.49777916", "0.4977714", "0.49690604", "0.4964627", "0.49629122", "0.4962077", "0.49487498", "0.49274477", "0.49074703", "0.49005723" ]
0.76484793
0
Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run.
def run (self, scalers = {'capital costs':1.0}): self.was_run = True self.reason = "OK" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'transmission': self.was_run = False self.reason = "Not a transmission project." return if not self.cd["model electricity"]: self.was_run = False self.reason = "Electricity must be modeled to analyze "+\ "transmission. It was not for this community." return if np.isnan(float(self.comp_specs['distance to community'])): self.was_run = False self.reason = ("There are no communities within 30 miles with" " lower cost of electricity.") return self.calc_average_load() try: self.get_intertie_values() except ValueError: self.was_run = False self.reason = ("Could not find data on community to intertie to.") return self.calc_pre_intertie_generation() self.calc_intertie_offset_generation() if self.cd["model heating fuel"]: # change these below self.calc_lost_heat_recovery() # see NOTE* #~ return if self.cd["model financial"]: # AnnualSavings functions (don't need to write) self.get_diesel_prices() # change these below self.calc_capital_costs() self.calc_annual_electric_savings() self.calc_annual_heating_savings() # AnnualSavings functions (don't need to write) self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd["current year"]) #~ print self.benefit_cost_ratio self.calc_levelized_costs(self.proposed_generation_cost)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run (self, scalers = {'capital costs':1.0}):\n\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'wind':\n self.was_run = False\n self.reason = \"Not a Wind project\"\n return\n\n try:\n #~ self.generation = self.forecast.get_generation(self.start_year)\n self.calc_average_load()\n self.calc_generation_wind_proposed()\n except AttributeError:\n self.diagnostics.add_warning(self.component_name,\n \"could not be run\")\n self.was_run = False\n self.reason = (\"Could not Calculate average load or \"\n \"proposed generation\")\n return\n\n\n\n\n #~ #~ print self.comp_specs['wind class']\n # ??? some kind of failure message\n if self.average_load is None or \\\n (self.average_load > self.comp_specs['average load limit'] and \\\n self.load_offset_proposed > 0):\n #~ float(self.comp_specs['wind class']) > \\\n #~ self.comp_specs['minimum wind class'] and \\\n\n # if the average load is greater that the lower limit run this component\n # else skip\n\n self.calc_transmission_losses()\n self.calc_excess_energy()\n self.calc_net_generation_wind()\n self.calc_electric_diesel_reduction()\n self.calc_diesel_equiv_captured()\n self.calc_loss_heat_recovery()\n self.calc_reduction_diesel_used()\n\n\n if self.cd[\"model financial\"]:\n # AnnualSavings functions (don't need to write)\n self.get_diesel_prices()\n\n # change these below\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n # AnnualSavings functions (don't need to write)\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n #~ print self.benefit_cost_ratio\n self.calc_levelized_costs(self.maintenance_cost)\n else:\n #~ print \"wind project not feasible\"\n self.was_run = False\n if self.load_offset_proposed <= 0:\n self.reason = \"Proposed load offset less than 0\"\n else:\n self.reason = \\\n \"Average load too small for viable wind generation.\"\n self.diagnostics.add_note(self.component_name,\n \"communities average load is not large enough to consider project\")\n #~ print self.benefit_cost_ratio", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'biomass_pellet':\n self.was_run = False\n self.reason = (\"Not a biomass pellet project.\")\n return\n\n if not self.cd[\"on road system\"]:\n self.diagnostics.add_warning(self.component_name,\n \"not on road system\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \\\n \"Not on road or marine highway system, so it is assumed that\" +\\\n \" pellets cannot be delivered cost effectively.\"\n return\n\n if np.isnan(float(self.comp_specs['peak month % of total'])):\n self.diagnostics.add_warning(self.component_name,\n \"bad config value for 'peak month % of total'\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \"bad config value for 'peak month % of total'\"\n return\n\n if self.cd[\"model heating fuel\"]:\n self.calc_heat_displaced_sqft()\n self.calc_energy_output()\n efficiency = self.comp_specs[\"pellet efficiency\"]\n self.calc_max_boiler_output(efficiency)\n factor = self.comp_specs['capacity factor']\n self.calc_biomass_fuel_consumed(factor)\n self.calc_diesel_displaced()\n\n\n if self.cd[\"model financial\"]:\n self.get_diesel_prices()\n\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n\n\n self.fuel_price_per_unit = self.cd['pellet price']\n\n self.calc_proposed_biomass_cost(self.fuel_price_per_unit)\n self.calc_displaced_heating_oil_price()\n\n\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n\n fuel_cost = self.biomass_fuel_consumed * self.fuel_price_per_unit\n self.calc_levelized_costs(self.maintenance_cost + fuel_cost)", "def run_module(self):\n try:\n if self.in_distributed_mode:\n self.output_dict['insurance_usd'] = 0\n self.output_dict['construction_permitting_usd'] = 0\n self.output_dict['project_management_usd'] = 0\n self.output_dict['bonding_usd'] = 0\n self.output_dict['markup_contingency_usd'] = 0\n self.output_dict['engineering_usd'] = 0\n self.output_dict['site_facility_usd'] = 0\n self.output_dict['total_management_cost'] = self.input_dict['override_total_management_cost']\n\n else:\n self.output_dict['insurance_usd'] = self.insurance()\n self.output_dict['construction_permitting_usd'] = self.construction_permitting()\n self.output_dict['project_management_usd'] = self.project_management()\n self.output_dict['bonding_usd'] = self.bonding()\n self.output_dict['markup_contingency_usd'] = self.markup_contingency()\n self.output_dict['engineering_usd'] = self.engineering_foundations_collection_sys()\n self.output_dict['site_facility_usd'] = self.site_facility()\n self.output_dict['total_management_cost'] = self.total_management_cost()\n self.output_dict['management_cost_csv'] = self.outputs_for_detailed_tab()\n self.output_dict['mangement_module_type_operation'] = self.outputs_for_module_type_operation()\n return 0, 0 # module ran successfully\n except Exception as error:\n traceback.print_exc()\n print(f\"Fail {self.project_name} ManagementCost\")\n return 1, error # module did not run successfully", "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def compute(self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber() / 100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n \r\n #Set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n\r\n #Compute and apend the results for each year\r\n totalInterest = 0.0\r\n for year in range(1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n startBalance = endBalance\r\n totalInterest += interest\r\n\r\n #Append the totals for the period\r\n result += \"Ending balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n\r\n #Output the results while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def execute(self):\n \n # initialize input parameters\n self.hubHt = self.hub_height\n self.ratedPower = self.machine_rating\n self.maxTipSpd = self.max_tip_speed\n self.rotorDiam = self.rotor_diameter\n self.maxCp = self.max_power_coefficient\n self.maxTipSpdRatio = self.opt_tsr\n self.cutInWS = self.cut_in_wind_speed\n self.cutOutWS = self.cut_out_wind_speed\n self.altitude = self.altitude\n\n if self.air_density == 0.0: \n # Compute air density \n ssl_pa = 101300 # std sea-level pressure in Pa\n gas_const = 287.15 # gas constant for air in J/kg/K\n gravity = 9.80665 # standard gravity in m/sec/sec\n lapse_rate = 0.0065 # temp lapse rate in K/m\n ssl_temp = 288.15 # std sea-level temp in K\n \n air_density = (ssl_pa * (1-((lapse_rate*(self.altitude + self.hubHt))/ssl_temp))**(gravity/(lapse_rate*gas_const))) / \\\n (gas_const*(ssl_temp-lapse_rate*(self.altitude + self.hubHt)))\n else:\n \t\tair_density = self.air_density\n\n # determine power curve inputs\n self.reg2pt5slope = 0.05\n \n #self.max_efficiency = self.drivetrain.getMaxEfficiency()\n self.ratedHubPower = self.ratedPower / self.max_efficiency # RatedHubPower\n\n self.omegaM = self.maxTipSpd/(self.rotorDiam/2.) # Omega M - rated rotor speed\n omega0 = self.omegaM/(1+self.reg2pt5slope) # Omega 0 - rotor speed at which region 2 hits zero torque\n Tm = self.ratedHubPower*1000/self.omegaM # Tm - rated torque\n\n # compute rated rotor speed\n self.ratedRPM = (30./pi) * self.omegaM\n \n # compute variable-speed torque constant k\n kTorque = (air_density*pi*self.rotorDiam**5*self.maxCp)/(64*self.maxTipSpdRatio**3) # k\n \n b = -Tm/(self.omegaM-omega0) # b - quadratic formula values to determine omegaT\n c = (Tm*omega0)/(self.omegaM-omega0) # c\n \n # omegaT is rotor speed at which regions 2 and 2.5 intersect\n # add check for feasibility of omegaT calculation 09/20/2012\n omegaTflag = True\n if (b**2-4*kTorque*c) > 0:\n omegaT = -(b/(2*kTorque))-(np.sqrt(b**2-4*kTorque*c)/(2*kTorque)) # Omega T\n #print [kTorque, b, c, omegaT]\n \n windOmegaT = (omegaT*self.rotorDiam)/(2*self.maxTipSpdRatio) # Wind at omegaT (M25)\n pwrOmegaT = kTorque*omegaT**3/1000 # Power at ometaT (M26)\n\n else:\n omegaTflag = False\n windOmegaT = self.ratedRPM\n pwrOmegaT = self.ratedPower\n\n # compute rated wind speed\n d = air_density*np.pi*self.rotorDiam**2.*0.25*self.maxCp\n self.ratedWindSpeed = \\\n 0.33*( (2.*self.ratedHubPower*1000. / ( d))**(1./3.) ) + \\\n 0.67*( (((self.ratedHubPower-pwrOmegaT)*1000.) / (1.5*d*windOmegaT**2.)) + windOmegaT )\n\n # set up for idealized power curve\n n = 161 # number of wind speed bins\n itp = [None] * n\n ws_inc = 0.25 # size of wind speed bins for integrating power curve\n Wind = []\n Wval = 0.0\n Wind.append(Wval)\n for i in xrange(1,n):\n Wval += ws_inc\n Wind.append(Wval)\n\n # determine idealized power curve \n self.idealPowerCurve (Wind, itp, kTorque, windOmegaT, pwrOmegaT, n , omegaTflag)\n\n # add a fix for rated wind speed calculation inaccuracies kld 9/21/2012\n ratedWSflag = False\n # determine power curve after losses\n mtp = [None] * n\n for i in xrange(0,n):\n mtp[i] = itp[i] #* self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower)\n #print [Wind[i],itp[i],self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower),mtp[i]] # for testing\n if (mtp[i] > self.ratedPower):\n if not ratedWSflag:\n ratedWSflag = True\n mtp[i] = self.ratedPower\n\n self.rated_wind_speed = self.ratedWindSpeed\n self.rated_rotor_speed = self.ratedRPM\n self.power_curve = mtp\n self.wind_curve = Wind\n\n # compute turbine load outputs\n self.rotor_torque = self.ratedHubPower/(self.ratedRPM*(pi/30.))*1000.\n self.rotor_thrust = air_density * self.thrust_coefficient * pi * self.rotor_diameter**2 * (self.ratedWindSpeed**2) / 8.", "def main():\n trades = get_trades()\n _print_trades(trades)\n\n print(\"\\n# Cost basis per asset\")\n _cost_basis_per_asset(trades)\n\n for year in range(2015, 2019):\n trades_for_year = _filter_trades_by_time(trades, year)\n _print_balances(trades_for_year, year)\n _print_agg_trades(trades_for_year, year)", "def calculate(self, technologies, value_streams, results, opt_years):\n self.initiate_cost_benefit_analysis(technologies, value_streams)\n super().calculate(self.ders, self.value_streams, results, opt_years)\n self.create_equipment_lifetime_report(self.ders)", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def execute(self, parameters, messages):\n execute_tool(arcsdm.acterbergchengci.Calculate, self, parameters, messages)\n return", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def _compute_(self):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fbgc = \"data/sim/{dn}/{rad}/exp.bgc.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), \n rad=self.rad, bm=self.bmnum)\n fflare = \"data/sim/{dn}/{rad}/exp.flare.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';fbgc='{fbgc}';bm={bm};\\\n fflare='{fflare}';rt_1D_sim;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fbgc=fbgc, fflare=fflare)\n os.system(cmd)\n return", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs", "def run_simulation(self):\n env = simpy.Environment()\n env.process(self._simulation(env))\n env.run(until=24 * HORIZON)\n return self.total_cost, self.total_profit, self.number_of_courses", "def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def solve_model(self): \n \n t0 = time.time() #start the clock\n \n # a. benchmark case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(1)\n \n #ii. set policy. in RR08 the benchmark economy has no taxes nor subsidies\n self.tau_benchmark = np.array([0, 0, 0]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(self.tau_benchmark) #set tax system\n \n #iii. benchmark equilibrium\n self.Yss_b, self.Kss_b, self.TFPss_b, self.average_firm_size_b, self.E_star_b, _, \\\n _, self.N_ss_b, self.w_ss_b, self.cdf_stationary_b, self.cdf_emp_b = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Benchmark Stationary Equilibrium\")\n print(\"-----------------------------------------\")\n print(f\"ss output = {self.Yss_b:.2f}\")\n print(f\"ss capital = {self.Kss_b:.2f}\")\n print(f\"ss tfp = {self.TFPss_b:.2f}\")\n print(f\"ss wage = {self.w_ss_b:.2f}\")\n print(f\"entry mass = {self.E_star_b:.3f}\")\n print(f\"avg. firm size = {self.average_firm_size_b:.2f}\")\n \n #b. plot (note that the distributions plotted here are unaffected by the distortionary policies)\n \n if self.plott:\n #i. initialize\n employed = [4.99, 49.99]\n firm_size_by_employee = np.zeros(len(employed)+1)\n share_employment = np.zeros(len(employed)+1)\n \n \n #i. percentage of firms that employ employed\n \n for i_e in range(len(employed)):\n summ = np.sum(firm_size_by_employee)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_stationary_b, employed[i_e])[0] #labor_demand_rel is labor demand with the lowest value normalized to 1\n firm_size_by_employee[i_e] = interpolate - summ\n firm_size_by_employee[-1] = 1 - np.sum(firm_size_by_employee)\n \n plt.pie(firm_size_by_employee, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Size of Firms by Firm Size (Number of Employees)')\n plt.savefig('firm_size_rr08.pdf')\n plt.show()\n \n \n #ii. employment percentage by firm size\n for i_e in range(len(employed)):\n summ = np.sum(share_employment)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_emp_b , employed[i_e])[0]\n share_employment[i_e] = interpolate - summ\n share_employment[-1] = 1 - np.sum(share_employment)\n \n plt.pie(share_employment, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Employment Share by Firm Size (Number of Employees)')\n plt.savefig('employment_by_firm_size_rr08.pdf')\n plt.show()\n \n #iii. productivity cdf and employment cdf\n plt.plot(self.grid_s, self.cdf_stationary_b)\n plt.plot(self.grid_s, self.cdf_emp_b)\n plt.title('Stationary CDF' )\n plt.xlabel('Productivity level')\n plt.ylabel('Cumulative Sum')\n plt.legend(['Firms by Productivity Level','Share of Employment'])\n plt.savefig('cdf_rr08.pdf')\n plt.show()\n \n \n \n #c. distortion case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(0)\n \n #ii. compute stationary economy for each tau\n \n for idx, tau in enumerate(self.tau_vector):\n \n #iii. find the subsidy rate that generates the same capital stock as in benchmark economy\n self.tau_s[idx] = self.find_subsidy_rate(tau)\n \n # set tax system with newly found tau_s and given tau\n tauv = np.array([-self.tau_s[idx], self.excempt_frac, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv) #set tax system\n \n #v. distorted stationary equilibrium\n self.Yss_d[idx], self.Kss_d[idx], self.TFPss_d[idx], self.average_firm_size_d[idx], self.E_star_d[idx], \\\n self.Y_set_d[idx,:], self.subsidy_size_d[idx], self.N_ss_d[idx], self.w_ss_d[idx],\\\n _, _ = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Distorted Stationary Equilibrium\")\n print(\"-----------------------------------------\\n\")\n if self.distortion_case == 1:\n print(\"Tax/Subidy Uncorrelated with Firm Level Producitivity\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Negatively Correlated with Firm Level Producitivity\")\n print(\"(low productivity firms recieve subsidy, high productivity taxed)\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Positively Correlated with Firm Level Producitivity\")\n print(\"(high productivity firms recieve subsidy, low productivity taxed)\\n\")\n if self.policy_type == 1 :\n print(\"Tax Type: Tax on output\\n\")\n elif self.policy_type == 2 :\n print(\"Tax Type: Tax on capital\\n\")\n elif self.policy_type == 3 :\n print(\"Tax Type: Tax on labor\\n\")\n print(f\"fraction of firms recieving subsidy = {self.subsidy_frac:.2f}\")\n print(f\"fraction of firms taxed = {1-self.subsidy_frac-self.excempt_frac:.2f}\")\n print(f\"fraction of firms excempt = {self.excempt_frac:.2f}\")\n print(\"-----------------------------------------\\n\")\n \n print(tabulate([['relative Yss', round(self.Yss_d[0]/self.Yss_b, 2), round(self.Yss_d[1]/self.Yss_b, 2), round(self.Yss_d[2]/self.Yss_b, 2), round(self.Yss_d[3]/self.Yss_b, 2)],\n ['relative TFPss', round(self.TFPss_d[0]/self.TFPss_b, 2), round(self.TFPss_d[1]/self.TFPss_b, 2), round(self.TFPss_d[2]/self.TFPss_b, 2), round(self.TFPss_d[3]/self.TFPss_b, 2)], \n ['relative entry mass', round(self.E_star_d[0]/self.E_star_b, 2), round(self.E_star_d[1]/self.E_star_b, 2), round(self.E_star_d[2]/self.E_star_b, 2), round(self.E_star_d[3]/self.E_star_b, 2)],\n ['share of subsidized output', round(self.Y_set_d[0,0], 2), round(self.Y_set_d[1,0], 2), round(self.Y_set_d[2,0], 2), round(self.Y_set_d[3,0], 2)],\n ['total subsidy paid of output', round(self.subsidy_size_d[0], 2), round(self.subsidy_size_d[1], 2), round(self.subsidy_size_d[2], 2), round(self.subsidy_size_d[3], 2)],\n ['subsidy rate (tau_s)', round(self.tau_s[0], 2), round(self.tau_s[1], 2), round(self.tau_s[2], 2), round(self.tau_s[3], 2)],\n [], \n ['relative Kss', round(self.Kss_d[0]/self.Kss_b, 2), round(self.Kss_d[1]/self.Kss_b, 2), round(self.Kss_d[2]/self.Kss_b, 2), round(self.Kss_d[3]/self.Kss_b, 2)], \n ['relative wss', round(self.w_ss_d[0]/self.w_ss_b, 2), round(self.w_ss_d[1]/self.w_ss_b, 2), round(self.w_ss_d[2]/self.w_ss_b, 2), round(self.w_ss_d[3]/self.w_ss_b, 2)], \n ['relative Nss', round(self.N_ss_d[0]/self.N_ss_b, 2), round(self.N_ss_d[1]/self.N_ss_b, 2), round(self.N_ss_d[2]/self.N_ss_b, 2), round(self.N_ss_d[3]/self.N_ss_b, 2)], \n ['relative avg. firm size', round(self.average_firm_size_d[0]/self.average_firm_size_b, 2), round(self.average_firm_size_d[1]/self.average_firm_size_b, 2), round(self.average_firm_size_d[2]/self.average_firm_size_b, 2), round(self.average_firm_size_d[3]/self.average_firm_size_b, 2)]],\n headers=['Variable', 'Tax = '+str(self.tau_vector[0]), \"Tax = \"+str(self.tau_vector[1]), 'Tax = '+str(self.tau_vector[2]), 'Tax = '+str(self.tau_vector[3])]))\n \n\n t1 = time.time()\n print(f'\\nTotal Run Time: {t1-t0:.2f} seconds')", "def main():\n \n welcome()\n myBill = get_bill_amt()\n pct = get_tip_pct()\n tip = calc_tip(myBill, pct)\n show_results(myBill, tip, pct)", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def main():\n # Retrieve csv_file name an max_investment from argument passed in console:\n arg_csv_file, arg_max_investment = set_arg()\n if arg_csv_file:\n csv_file = arg_csv_file\n else:\n csv_file = 'dataset1_Python+P7.csv'\n if arg_max_investment:\n max_investment = float(arg_max_investment)\n else:\n max_investment = 500.00\n\n # Retrieve dataset:\n base_dataset = open_convert_and_clean_csv(csv_file)\n\n # Retrieve solution:\n start = time.perf_counter()\n print()\n print(f\"Processing with file '{csv_file}' containing {len(base_dataset)} shares...\")\n print(f\"Maximum investment: {max_investment}€\")\n print(\"Please wait...\")\n computable_dataset = add_roi_to_dataset(convert_dataset_to_cents(base_dataset))\n best_roi, combination = best_combination_dynamic(computable_dataset, max_investment)\n\n # Formatting results:\n combination.sort(key=lambda x: x[2], reverse=True)\n combination_in_euros = convert_dataset_to_euros(combination)\n best_roi /= 100\n # Following calculus is done on cent prices (combination) to avoid approximations with floats\n shares_cost = calculate_shares_cost_sum(combination) / 100\n\n # Printing results:\n print()\n print(f\"Length of dataset: {len(computable_dataset)}\")\n print(f\"Duration of Analysis: {elapsed_time_formatted(start)}\")\n print()\n print(f\"Best Return on investment after 2 years: {round(best_roi, 2)}€\")\n print(f\"Number of shares to buy : {len(combination_in_euros)}\")\n print(f\"Total cost: {round(shares_cost, 2)}€\")\n print()\n print(f\"Best combination of shares ordered by performance: \")\n for share in combination_in_euros:\n print(f\"{share[0]} | Price: {share[1]}€ | profit: {share[2]}%\")\n print()", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)" ]
[ "0.69873667", "0.64584976", "0.6378536", "0.63241583", "0.63009036", "0.62177217", "0.6204553", "0.6156644", "0.6136159", "0.6083111", "0.6052154", "0.6037782", "0.60245705", "0.60139", "0.5996558", "0.599415", "0.5984706", "0.59628487", "0.59623754", "0.59618145", "0.5958885", "0.59586155", "0.5950948", "0.5950873", "0.5934772", "0.59136856", "0.59029186", "0.58681715", "0.58336", "0.5832624" ]
0.68490684
1
Calculate the Average Diesel load of the current system Attributes
def calc_average_load (self): #~ self.generation = self.forecast.generation_by_type['generation diesel']\ #~ [self.start_year] self.average_load = \ self.forecast.yearly_average_diesel_load.ix[self.start_year]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)", "def get_loadavg(cls):\n\n with open(\"/proc/loadavg\") as loadavg:\n loadavg = loadavg.read().split()\n kernel_entities = loadavg[3].split(\"/\")\n loadavg_stat = { StatsKeys.LOADAVG :\n {\n StatsKeys.LAST_1_MIN : float(loadavg[0]),\n StatsKeys.LAST_5_MIN : float(loadavg[1]),\n StatsKeys.LAST_15_MIN : float(loadavg[2]),\n StatsKeys.RUNNABLE_ENTITIES : int(kernel_entities[0]),\n StatsKeys.SCHEDULING_ENTITIES : int(kernel_entities[1])\n }\n }\n logger.debug(\"Loadavg stats: {}\".format(' '.join(loadavg)))\n\n return loadavg_stat", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")", "def getloadavg():\n global _loadavg_inititialized\n\n if not _loadavg_inititialized:\n cext.init_loadavg_counter()\n _loadavg_inititialized = True\n\n # Drop to 2 decimal points which is what Linux does\n raw_loads = cext.getloadavg()\n return tuple([round(load, 2) for load in raw_loads])", "def loadavg():\n sin = psutil.getloadavg()\n return [\n round(sin[0], 3),\n round(sin[1], 3),\n round(sin[2], 3)\n ]", "def load_average(self):\n return _favg(self.load_samples)", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performances)/max_performance", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)", "def _find_average_age():\r\n count, total = 0, 0\r\n for resource in resources:\r\n patient = resource[\"resource\"]\r\n if \"birthDate\" in patient:\r\n count += 1\r\n dob = patient[\"birthDate\"].split(\"-\")\r\n dob = datetime(int(dob[0]), int(dob[1]), int(dob[2]), 0, 0, 0, 0)\r\n if \"deceasedDateTime\" in patient:\r\n death_time = patient[\"deceasedDateTime\"].split(\"T\")[0].split(\r\n \"-\")\r\n death_time = datetime(int(death_time[0]), int(death_time[1]),\r\n int(death_time[2]), 0, 0, 0, 0)\r\n else:\r\n death_time = datetime.now()\r\n age = relativedelta(death_time, dob).years\r\n total += age\r\n if count == 0:\r\n return count, count\r\n return total / count, count", "def average_level(self):\n spl = [utils.dbspl(x) for x in self.load_files()]\n return np.mean(spl), np.std(spl)", "def get_load_avg():\n \n with open('/proc/loadavg') as f:\n line = f.readline()\n \n return [float(x) for x in line.split()[:3]]", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def ram_average(self):\n return _favg(self.ram_samples)", "def average(self):\n return self.properties.get('average')", "def get_fiber_density_average():\n return Global_Module.global_fiber_density_with_average", "def CountRandomLoadRate(self):\n\t\treturn self._get_attribute('countRandomLoadRate')", "def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()", "def load_list(self):\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.load_avg,())\n return res", "def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])", "def global_efficiency(self, node_list1, node_list2, link_attribute=None):\n local_efficiency = self.local_efficiency(node_list1, node_list2,\n link_attribute)\n return 1/np.mean(local_efficiency)", "def total_experiment_load():\n loads = tempfeeder_exp()\n return total_load_in_experiment_periods(loads, loads.user_ids)", "def DAM(self):\n return self.get_class_average(self.DAM_class_level)", "def average_speed(self):\n return self._average_speed", "def getAvg(self):\r\n\t\treturn self.data['avg']" ]
[ "0.7328244", "0.64413404", "0.6380107", "0.63506234", "0.6312014", "0.6299931", "0.618164", "0.6149521", "0.6113699", "0.608994", "0.6078722", "0.59945375", "0.5977917", "0.5886562", "0.5843336", "0.5759978", "0.5727464", "0.565973", "0.5629968", "0.56079954", "0.5549923", "0.5545292", "0.5543038", "0.5536138", "0.55217427", "0.5501296", "0.5498026", "0.5487671", "0.54786307", "0.5467715" ]
0.7217842
1
Calculate the generation offset by connecting a transmission line to the community to connect to. Attributes
def calc_intertie_offset_generation (self): self.generation = \ self.forecast.get_generation(self.start_year,self.end_year) dist = self.comp_specs['distance to community'] self.annual_transmission_loss = \ 1 - ( (1- (self.comp_specs['transmission loss per mile']/ 100.0)) ** dist) self.intertie_offset_generation = \ self.generation * (1 + self.annual_transmission_loss) gen_eff = self.intertie_generation_efficiency self.intertie_offset_generation_fuel_used = \ self.intertie_offset_generation / gen_eff #~ print 'self.proposed_generation',self.proposed_generation #~ print con
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_and_set_propagation_distances(self):\n\n self.l_edge = self.calculate_distance_edge()\n self.l_int = self.calculate_distance_interaction()", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def calculate_propagation(self):\n pass", "def chain_offset(self):\n return self._chain_offset", "def calc_net_generation_wind (self):\n self.net_generation_wind = self.generation_wind_proposed - \\\n self.transmission_losses -\\\n self.excess_energy\n #~ print 'self.net_generation_wind',self.net_generation_wind", "def relativize_coordinates(self):\n if len(self.nodes) + len(self.connecting) < 1:\n return\n smallest_c = (self.nodes+self.connecting)[0].c\n for node in self.nodes+self.connecting:\n if node.c < smallest_c:\n smallest_c = node.c\n for node in self.nodes+self.connecting:\n node.c = node.c - smallest_c", "def lidar_relative(self):\n return self.distance", "def get_shapeOffset(self):\n try:\n _str_func = ' get_shapeOffset'.format(self)\n log.debug(\"|{0}| >> ... [{1}]\".format(_str_func,self)+ '-'*80)\n \n ml_check = self.getBlockParents()\n ml_check.insert(0,self)\n \n for mBlock in ml_check:\n l_attrs = ['controlOffset','skinOffset']\n for a in l_attrs:\n if mBlock.hasAttr(a):\n v = mBlock.getMayaAttr(a)\n log.debug(\"|{0}| >> {1} attr found on rigBlock: {2} | {3}\".format(_str_func,a,v,mBlock.mNode)) \n return v \n return 1\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err,msg=vars())", "def get_alignment_offset(self):\n\n return 0", "def position_of_transmission(self, transmission):\n if transmission <= 0:\n transmission = 1e-6\n OD = -log10(transmission)\n\n if OD >= self.OD_max:\n return self.motor_max\n if OD <= self.OD_min:\n return self.motor_min\n\n p_min = self.motor_range[0]\n p_max = self.motor_range[1]\n angle = p_min + (OD - self.OD_range[0]) / self.OD_range[1] * (p_max - p_min)\n # Assume the transmission is flat outside the angular range\n if angle < min(p_min, p_max):\n angle = min(p_min, p_max)\n if angle > max(p_min, p_max):\n angle = max(p_min, p_max)\n return angle", "def _position_to_offset(self, position: Position) -> int:\n return self._line_offsets[position.line] + position.character", "def offset_graph():\n pylon_graph = graph.graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def assign_lengths(G):\r\n for u, v, d in G.edges(data=True):\r\n posA = nx.get_node_attributes(G, 'pos')[u]\r\n posB = nx.get_node_attributes(G, 'pos')[v]\r\n\r\n dist = np.linalg.norm(np.subtract(posA, posB))\r\n d['distance'] = dist\r\n return G", "def relative_rate(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_relative_rate(self)", "def calculate_distance_line(\n r_packet, comov_nu, is_last_line, nu_line, time_explosion\n):\n\n nu = r_packet.nu\n\n if is_last_line:\n return MISS_DISTANCE\n\n nu_diff = comov_nu - nu_line\n\n # for numerical reasons, if line is too close, we set the distance to 0.\n if r_packet.is_close_line:\n nu_diff = 0.0\n r_packet.is_close_line = False\n\n if nu_diff >= 0:\n distance = (nu_diff / nu) * C_SPEED_OF_LIGHT * time_explosion\n else:\n print(\"WARNING: nu difference is less than 0.0\")\n raise MonteCarloException(\n \"nu difference is less than 0.0; for more\"\n \" information, see print statement beforehand\"\n )\n\n if numba_config.ENABLE_FULL_RELATIVITY:\n return calculate_distance_line_full_relativity(\n nu_line, nu, time_explosion, r_packet\n )\n return distance", "def pad_instance(line):\n \n # split the line and extract attributes\n attributes = line.split(\",\")\n seq = attributes[0].strip()\n inc = int(attributes[1])\n out = int(attributes[2])\n lifetime = float(attributes[3])\n classify = attributes[4]\n inc_50 = int(attributes[5])\n out_50 = int(attributes[6])\n\n # how many cells were sent/received before any padding\n initial_num_cells = inc + out\n\n # the ratio of outgoing cells to incoming cells\n out_in_ratio = float(out)/float(inc)\n new_seq, orig_seq_length, inc_added, out_added = pad_sequence(seq)\n \n # account for added beginning sequence padding in overall total\n inc += inc_added\n out += out_added\n\n # account for added beginning sequence padding in first 50 or so cells\n inc_50 += inc_added\n out_50 += out_added\n\n out_padding = 0\n in_padding = 0\n \n # flip a coin\n coin = random.randint(1, 9)\n \n # if the circuit has more incoming cells than outgoing cells \n # (typical of Client-RP)\n if classify != \"noise\" and out_in_ratio < 0.98:\n \n # pad the outgoing cells to bring the ratios closer\n if coin <= 4:\n out_padding = int(out / out_in_ratio * 0.85)\n else:\n out_padding = int(out / out_in_ratio * 1.05)\n \n # if there are more outgoing than incoming cells \n # (typical of HS-RP)\n elif classify != \"noise\" and out_in_ratio > 1.02:\n \n # pad the incoming cells to bring the ratios closer\n if coin <= 4:\n in_padding = int(inc * out_in_ratio * 0.9)\n else:\n in_padding = int(inc * out_in_ratio * 1.05)\n\n # add the appropriate padding to the overall totals\n inc += in_padding\n out += out_padding\n\n # we have to account for how padding would affect the first 50 or so cells\n first_cells = inc_50 + out_50\n first_ratio = float(inc_50)/first_cells\n if first_cells > 50:\n first_cells = 50\n \n # the first 50 cells should have a similar ratio to the padding\n new_inc_percent = float(inc) / (inc + out)\n \n # add a bit of randomness to the first 50 if they are not noise\n first_random = random.randint(1, 201) / 1000.0\n flip = random.randint(1, 11)\n if flip % 2 == 0:\n if new_inc_percent + new_inc_percent * first_random < 1:\n new_inc_percent += new_inc_percent * first_random\n else:\n if new_inc_percent - new_inc_percent * first_random < 1:\n new_inc_percent -= new_inc_percent * first_random\n\n general = False\n # don't mess with the ratio if we didn't pad the whole thing\n if classify == \"noise\":\n general = True\n new_inc_percent = first_ratio\n\n # the first 50 cells should follow the padded ratio\n inc_50 = int(new_inc_percent * first_cells)\n out_50 = first_cells - inc_50\n\n # the padded instance for the new file\n padded_instance = new_seq + \",\" + str(inc) + \",\" + str(out) + \",\" \\\n + str(lifetime) + \",\" + classify + \",\" + str(inc_50) + \",\" + str(out_50)\n\n num_cells_with_padding = inc + out\n\n # return the padded instance, the initial number of cells for the circuit,\n # and the number of cells after padding, because we need to know\n # how much overhead the padding adds\n return padded_instance, initial_num_cells, num_cells_with_padding, general", "def calculateRelations(self, nPlayer, nTarget):\n\t\t\n\t\tif (nPlayer != nTarget and gc.getTeam(gc.getPlayer(nPlayer).getTeam()).isHasMet(gc.getPlayer(nTarget).getTeam())):\n\t\t\tnAttitude = 0\n\t\t\tszAttitude = CyGameTextMgr().getAttitudeString(nPlayer, nTarget)\n\t\t\tltPlusAndMinuses = re.findall (\"[-+][0-9]+\\s?: \", szAttitude)\n\t\t\tfor i in range (len (ltPlusAndMinuses)):\n\t\t\t\tnAttitude += int (ltPlusAndMinuses[i][:-2])\n\t\t\treturn nAttitude\n\t\telse:\n\t\t\treturn 0", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def __init__(self, owner1: 'ln.LightningNode', owner2: 'ln.LightningNode'):\n self.address = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))\n self.owner1 = owner1\n self.owner2 = owner2\n self.total_msat = 0 # will be changed as owners deposit funds.", "def rel_position_um(soma, d):\n \n return soma['wParamsNum'][26:29] - d['wParamsNum'][26:29]", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset" ]
[ "0.5927528", "0.57913303", "0.5498828", "0.54520005", "0.5265909", "0.52382195", "0.520508", "0.5108444", "0.50968987", "0.5064629", "0.5039651", "0.5012671", "0.4999943", "0.4968546", "0.4968546", "0.49591216", "0.4952447", "0.49499637", "0.49423927", "0.49011195", "0.4898525", "0.4898525", "0.4898525", "0.4897764", "0.48889238", "0.488882", "0.48787028", "0.48787028", "0.48787028", "0.48787028" ]
0.62144107
0
Calculate the heat recovery
def calc_lost_heat_recovery (self): if not self.cd['heat recovery operational']: self.lost_heat_recovery = [0] else: gen_eff = self.cd["diesel generation efficiency"] self.lost_heat_recovery = \ (self.generation / gen_eff )* .10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery", "def generateHeatMask( im ):\n featref = computeFeatures(im)\n h,w,nbp = im.shape\n print(\"w:%d,h:%d\"%(w,h))\n heatMap = np.zeros((h,w,1), dtype=np.int8)\n black = [0,0,0]\n white = [255,255,255]\n rMaxDiff = 0\n for j in range(h):\n print(\"j:%d\" % j )\n for i in range(w):\n #~ print(\"i:%d\" % i )\n arDiff = []\n for color in [black,white]:\n #~ print(\"color:%s\" % color)\n imt = np.copy(im)\n \n # quite same time on my tablet !!! \n # (would have think the [] would be far fastest!)\n if 0:\n cv2.circle(imt, (i,j), 1, color )\n else:\n imt[j,i]=color\n if 0:\n cv2.imshow(\"imt\",imt)\n cv2.waitKey(1)\n #~ feat = computeFeatures(imt)\n #~ rDiff = diffFeatures(featref,feat)\n rDiff = mseFloat(im,imt)\n arDiff.append(rDiff)\n #~ print(rDiff)\n rDiff = max(arDiff)\n if rDiff > rMaxDiff:\n rMaxDiff = rDiff\n heatMap[j,i] = rDiff*10\n print(\"rMaxDiff: %5.3f\" % rMaxDiff )\n #~ print(dir(cv2))\n #~ heatMap = cv2.resize(heatMap,(w*2,h*2))\n cv2.namedWindow(\"heat\",cv2.CV_WINDOW_AUTOSIZE|cv2.WINDOW_NORMAL)\n cv2.imshow(\"heat\",heatMap)\n cv2.resizeWindow(\"heat\",600,480)\n cv2.waitKey(0)", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def get_specific_heat() -> float:\n return 1006.0", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def tldiffusion(self, dt):\n\n # Reset erosion, depo, trans and flux_in to 0\n self.erosion[:] = 0.0\n self.depo[:] = 0.0\n self.trans[:] = 0.0\n self.flux_in[:] = 0.0\n\n # Downstream steepest slope at node:\n self.steepest = self.grid.at_node[\"topographic__steepest_slope\"]\n # On each node, node ID of downstream receiver node\n # (on node (i), ID of node that receives flow from node (i)):\n self.receiver = self.grid.at_node[\"flow__receiver_node\"]\n\n dx = self.grid.dx\n cores = self.grid.core_nodes\n\n # Calculate influx rate on node i = outflux of nodes\n # whose receiver is i\n for i in self.grid.core_nodes:\n self.flux_in[self.receiver[i]] += self.flux_out[i]\n\n # Calculate transport coefficient\n # When S ~ Scrit, d_coeff is set to \"infinity\", for stability and\n # so that there is no deposition\n if self.steepest[i] >= self.slope_crit:\n self.d_coeff[i] = 1000000000.0\n else:\n self.d_coeff[i] = 1 / (\n 1 - (np.power(((self.steepest[i]) / self.slope_crit), 2))\n )\n\n # Calculate deposition rate on node\n self.depo[cores] = self.flux_in[cores] / self.d_coeff[cores]\n\n # Calculate erosion rate on node (positive value)\n # If S > Scrit, erosion is simply set for the slope to return to Scrit\n # Otherwise, erosion is slope times erodibility coefficent\n for i in self.grid.core_nodes:\n if self.steepest[i] > self.slope_crit:\n self.erosion[i] = dx * (self.steepest[i] - self.slope_crit) / (100 * dt)\n else:\n self.erosion[i] = self.k * self.steepest[i]\n\n # Update elevation\n self.elev[i] += (-self.erosion[i] + self.depo[i]) * dt\n\n # Calculate transfer rate over node\n self.trans[cores] = self.flux_in[cores] - self.depo[cores]\n\n # Calculate outflux rate\n self.flux_out[:] = self.erosion + self.trans", "def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t", "def specific_heat(Ekinv,n0,N,t0):\n Cv = np.zeros(10)\n for i in range(10):\n avg_K_squared = (1 / len(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]) * np.sum(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]))**2\n FluctK = 1 / len(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]) * np.sum(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]**2) - avg_K_squared\n Cv[i] = -1 / (FluctK/ avg_K_squared * 3 * N / 2 - 1) * 3 * N / 2\n return Cv", "def apply_heat_recovery(\n enduse,\n heat_recovered,\n service,\n service_techs,\n curr_yr\n ):\n try:\n # Fraction of heat recovered in current year\n heat_recovered_p_cy = heat_recovered[enduse][curr_yr]\n\n if heat_recovered_p_cy == 0:\n return service, service_techs\n else:\n # Apply to technologies each stored in dictionary\n service_reduced_techs = {}\n for tech, service_tech in service_techs.items():\n service_reduced_techs[tech] = service_tech * (1.0 - heat_recovered_p_cy)\n\n # Apply to array\n service_reduced = service * (1.0 - heat_recovered_p_cy)\n\n return service_reduced, service_reduced_techs\n\n except KeyError:\n\n # no recycling defined\n return service, service_techs", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def load_heatdiag(self, **kwargs):\n read_rz = kwargs.get('read_rz',True) #read heat load in RZ\n\n self.hl=[]\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",0,read_rz) ) #actual reading routine\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",1,read_rz) )#actual reading routine\n\n for i in [0,1] :\n try:\n self.hl[i].psin=self.hl[i].psi[0,:]/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psin\")\n\n #read bfieldm data if available\n self.load_bfieldm()\n\n #dt=self.unit_dic['sml_dt']*self.unit_dic['diag_1d_period']\n wedge_n=self.unit_dic['sml_wedge_n']\n for i in [0,1]:\n dpsin=self.hl[i].psin[1]-self.hl[i].psin[0] #equal dist\n #ds = dR* 2 * pi * R / wedge_n\n ds=dpsin/self.bfm.dpndrs* 2 * 3.141592 * self.bfm.r0 /wedge_n #R0 at axis is used. should I use Rs?\n self.hl[i].rmid=np.interp(self.hl[i].psin,self.bfm.psino,self.bfm.rmido)\n self.hl[i].post_heatdiag(ds)\n self.hl[i].total_heat(wedge_n)", "def test_calculate_specific_heat(self):\n expected = np.array([1089.5, 1174.0, 1258.5], dtype=np.float32)\n result = WetBulbTemperature()._calculate_specific_heat(self.mixing_ratio)\n self.assertArrayAlmostEqual(result, expected, decimal=2)", "def bruteforce(self):\n import time\n t1 = time.time()\n for i in range(self.td.shape[0]):\n #Get the latitude at the start of the row, this is used for the entire row\n\n if i % config.LATITUDE_STEP == 0:\n startlat = i + config.LATITUDE_STEP #move to the center of the step\n startlat += self.start #Offset for parallel segmentation\n\n # This is the latitude at the center of the tile defined by\n # the image width, and the latitude_step\n x = int(self.td.shape[1] / 2)\n y = int((startlat + config.LATITUDE_STEP) / 2)\n latitude, _ = self.temperature.pixel_to_latlon(x,y)\n\n lat_f = PchipInterpolator(self.latitudenodes, self.lookup, extrapolate=False, axis=0)\n #The reshape corresponds to the dimensions of the OLAP cube\n # 5 elevations, 5 slope azimuths, 3 slopes, 3 opacities, 3 albedos, and finally 20 TI\n data = lat_f(latitude)\n compressedlookup = data.reshape(6,5,3,3,3,20)\n # Compute the PChip interpolation function for elevation\n elevation_interp_f = PchipInterpolator(np.array([-5.0, -2.0, -1.0, 1.0, 6.0, 8.0]), compressedlookup, extrapolate=False, axis=0)\n \n for j in range(self.td.shape[1]):\n # Each interpolation is composed in 2 parts.\n # 1. The interpolation function is computed.\n # 2. The interpolation function is applied.\n #print(self.reference[i,j], self.r_ndv)\n # If either the reference or the input THEMIS have no data\n if (self.td[i,j] == self.ndv) or (self.reference[i,j] == self.r_ndv):\n #The pixel is no data in the input, propagate to the output\n self.resultdata[i,j] = self.ndv\n continue\n\n #Interpolate elevation\n try:\n new_elevation = elevation_interp_f(self.ed[i,j])\n except:\n # The elevation is bad.\n self.resultdata[i,j] = self.ndv\n self.log[i,j] = self.error_codes['elevation_out_of_bounds']\n continue\n #Interpolate Slope Azimuth\n slopeaz_f = self.compute_interpolation_function(sorted(self.slopeaz_lookup.keys()),\n new_elevation,\n config.SLOPEAZ_INTERPOLATION)\n new_slopeaz = slopeaz_f(self.sz[i,j])\n #Interpolate Slope\n slope_f = self.compute_interpolation_function(sorted(self.slope_lookup.keys()),\n new_slopeaz,\n config.SLOPE_INTERPOLATION)\n capped_slope = self.sd[i,j]\n if capped_slope > 60.0:\n capped_slope = 60.0\n new_slope = slope_f(capped_slope)\n # I am having problems here with pulling TAU properly - check montabone!\n #Interpolate Tau\n tau_f = PchipInterpolator(sorted(self.tau_lookup.keys()),\n new_slope,\n extrapolate=False,\n axis=0)\n new_tau = tau_f(self.od[i,j])\n #Interpolate Albedo\n albedo_f = self.compute_interpolation_function(sorted(self.albedo_lookup.keys()),\n new_tau,\n config.ALBEDO_INTERPOLATION)\n new_albedo = albedo_f(self.ad[i,j])\n #Interpolate Inertia\n self.resultdata[i,j] = self.extract_monotonic(self.td[i,j],\n new_albedo)", "def heat_balance(index):\n t = index[0]\n return (\n heat_hru_out[t]\n + pulp.lpSum([component_output[i, t] for i in index_heat_out])\n - pulp.lpSum([component_input[i, t] for i in index_heat_in])\n + heat_unserve[t]\n - heat_dump[t]\n == forecast[\"heat_load\"][t]\n )", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def calc_loss_flux(self, shotANDplunge=\"167192.1\"):\n\n wb = xl.load_workbook(\"recLPdata.xlsx\", data_only=True)\n dataSheet = wb.get_sheet_by_name(\"Sheet1\")\n\n # Get the correct cells.\n if shotANDplunge == \"167192.1\":\n timeLow = \"A3\"\n timeHigh = \"A64\"\n densLow = \"C3\"\n densHigh = \"C64\"\n tempLow = \"D3\"\n tempHigh = \"D64\"\n rMinRsepLow = \"G3\"\n rMinRsepHigh = \"G64\"\n elif shotANDplunge == \"167192.2\":\n timeLow = \"I3\"\n timeHigh = \"I55\"\n densLow = \"K3\"\n densHigh = \"K55\"\n tempLow = \"L3\"\n tempHigh = \"L55\"\n rMinRsepLow = \"O3\"\n rMinRsepHigh = \"O55\"\n elif shotANDplunge == \"167193.1\":\n timeLow = \"Q3\"\n timeHigh = \"Q61\"\n densLow = \"S3\"\n densHigh = \"S61\"\n tempLow = \"T3\"\n tempHigh = \"T61\"\n rMinRsepLow = \"W3\"\n rMinRsepHigh = \"W61\"\n elif shotANDplunge == \"167193.2\":\n timeLow = \"Y3\"\n timeHigh = \"Y48\"\n densLow = \"AA3\"\n densHigh = \"AA48\"\n tempLow = \"AB3\"\n tempHigh = \"AB48\"\n rMinRsepLow = \"AE3\"\n rMinRsepHigh = \"AE48\"\n elif shotANDplunge == \"167194.1\":\n timeLow = \"AG3\"\n timeHigh = \"AG71\"\n densLow = \"AI3\"\n densHigh = \"AI71\"\n tempLow = \"AJ3\"\n tempHigh = \"AJ71\"\n rMinRsepLow = \"AM3\"\n rMinRsepHigh = \"AM71\"\n elif shotANDplunge == \"167194.2\":\n timeLow = \"AO3\"\n timeHigh = \"AO67\"\n densLow = \"AQ3\"\n densHigh = \"AQ67\"\n tempLow = \"AR3\"\n tempHigh = \"AR67\"\n rMinRsepLow = \"AU3\"\n rMinRsepHigh = \"AU67\"\n elif shotANDplunge == \"167195.1\":\n timeLow = \"AW3\"\n timeHigh = \"AW60\"\n densLow = \"AY3\"\n densHigh = \"AY60\"\n tempLow = \"AZ3\"\n tempHigh = \"AZ60\"\n rMinRsepLow = \"BC3\"\n rMinRsepHigh = \"BC60\"\n elif shotANDplunge == \"167195.2\":\n timeLow = \"BE3\"\n timeHigh = \"BE59\"\n densLow = \"BG3\"\n densHigh = \"BG59\"\n tempLow = \"BH3\"\n tempHigh = \"BH59\"\n rMinRsepLow = \"BK3\"\n rMinRsepHigh = \"BK59\"\n else:\n return print(\"Incorrect shot/plunge.\")\n\n times = self.returnArray(dataSheet, timeLow, timeHigh)\n dens = self.returnArray(dataSheet, densLow, densHigh)\n temps = self.returnArray(dataSheet, tempLow, tempHigh)\n rmins = self.returnArray(dataSheet, rMinRsepLow, rMinRsepHigh)\n\n # Go from 10^18 m^-3 to just m^-3.\n for index in range(0, len(dens)):\n if dens[index] is None:\n continue\n else:\n dens[index] = dens[index] * 10**18\n\n # Plasma sound speed assuming Te = Ti.\n sound_speeds = [(temp*2 / massD)**0.5 for temp in temps]\n\n self.shot_and_plunge = shotANDplunge\n self.times = times\n self.dens = dens\n self.temps = temps\n self.rmins = rmins\n self.sound_speeds = sound_speeds\n\n # The flux of W off the probe due to sputtering. sputt_flux = yield * flux of dueterium.\n def sputt_flux(ne, Ti, Te):\n # Sputtering energy threshold of tungsten oxide in eV. Note pure W is 160 eV.\n eThresh = 65\n soundSpeed = ((float(Te) + float(Ti)) / massD)**0.5\n\n # Use lambda function for use in integrate,\n func = lambda E: 0.528 * alpha * Z_D * (massD / (u0*(massD + massW))) * 0.059 * (E+3*Ti) ** (1.0/3.0) * soundSpeed * ne * 2 * (E/3.1415)**0.5 * (1/float(Ti))**(1.5) * math.exp(-E/Ti)\n ans, err = integrate.quad(func, eThresh, np.inf)\n\n #print(\"Sputtered Flux: \" + str(ans))\n #print(\"Sputtered Flux Error: \" + str(err/ans * 100) + \"%\")\n\n return ans\n\n\n for probe in [\"A\", \"B\", \"C\"]:\n # Use corresponding size for desired probe.\n if probe==\"A\":\n size = aSize\n elif probe==\"B\":\n size = bSize\n elif probe==\"C\":\n size = cSize\n else:\n print(\"Incorrect probe entry. Should be either A, B, or C.\")\n\n print(\"Calculating loss flux for \" + probe + \" probes...\")\n\n flux_loss = []\n for index in range(0, len(self.temps)):\n Te = self.temps[index]\n ne = self.dens[index]\n cs = self.sound_speeds[index]\n\n # Approx. speed of W entering flux tube.\n v0 = 0.5 * cs\n\n # Get the ionization rate coefficient for a specific temperature.\n ad = atomic.element('tungsten')\n temperatureRange = np.logspace(0,4,100)\n S = ad.coeffs['ionisation']\n f = interpolate.interp1d(temperatureRange, S(0, temperatureRange, ne))\n coeff = f(Te)\n\n # Calculate lamda_ionization.\n lambda_iz = v0 * (ne * coeff)**(-1)\n\n # Fraction ionized in the flux tube (i.e. it will return to the probe)\n frac = 1 - math.exp(-size / lambda_iz)\n #print(\"Fraction Ionized: \" + str(frac))\n\n # Thus the fraction lost is 1-frac of the sputtered flux.\n Ti = Te\n fracFluxLost = (1 - frac) * sputt_flux(ne=ne, Ti=Ti, Te=Te)\n #print(\"Flux Lost: \" + str(fracFluxLost))\n\n flux_loss.append(fracFluxLost)\n\n self.loss_dict[probe] = {\"rminrsep\":self.rmins, \"flux\":flux_loss}", "def diffusive_heat_flux(discr, eos, cv, j):\n if isinstance(eos, MixtureEOS):\n h_alpha = eos.species_enthalpies(cv)\n return sum(h_alpha.reshape(-1, 1) * j)\n return 0", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def _calc(self):\r\n u = self._fadefunc(self.xf)\r\n v = self._fadefunc(self.yf)\r\n w = self._fadefunc(self.zf)\r\n\r\n # populate the hashes dict\r\n self._hash()\r\n \r\n # once the hash dict is populated, start calculating the dot product between \r\n # the gradient vector and the distance vectors, which is done in the _grad method.\r\n # finally linearly interpolate the values to get the avg value\r\n # first interpolate in the x-dir, then in y-dir\r\n x1: float = self._lerp(self._grad(self.hashes[\"aaa\"], self.xf, self.yf, self.zf),\r\n self._grad(self.hashes[\"baa\"], self.xf - 1, self.yf, self.zf), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"aba\"], self.xf, self.yf - 1, self.zf),\r\n self._grad(self.hashes[\"bba\"], self.xf - 1, self.yf - 1, self.zf), u)\r\n\r\n # the first y-dir lerp\r\n y1: float = self._lerp(x1, x2, v)\r\n\r\n x1: float = self._lerp(self._grad(self.hashes[\"aab\"], self.xf, self.yf, self.zf - 1),\r\n self._grad(self.hashes[\"bab\"], self.xf - 1, self.yf, self.zf - 1), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"abb\"], self.xf, self.yf - 1, self.zf - 1),\r\n self._grad(self.hashes[\"bbb\"], self.xf-1, self.yf-1, self.zf-1), u)\r\n\r\n # the second y-dir lerp\r\n y2: float = self._lerp(x1, x2, v)\r\n\r\n # the final noise value, which will be in the range [0, 1]\r\n self.value = (self._lerp(y1, y2, w) + 1)/2\r\n return self.value", "def energy_map(img):\n img_new = img.astype(float) #converting image to float\n total_energy = 0.0 # To store the sum of energy for all channels\n r,c,d = img.shape \n for i in range(d):\n dy = np.zeros([r, c], dtype=float) \n dx = np.zeros([r, c], dtype=float)\n if r > 1:\n dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows\n if c > 1:\n dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns\n total_energy += np.absolute(dy) + np.absolute(dx) \n return total_energy #Total energy map for entire image", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def recovery(self):\n\n def exponential(time, tau):\n\n time = list(map(lambda x: float(x), time))\n exponent = np.exp(-np.divide(time, tau))\n return (1 - exponent)\n \n \n initial_guess = [55]\n \n tau = []\n for i in range(self.n_cols):\n current = self.screened_data.iloc[:,i]\n popt = curve_fit(exponential, self.xaxis, current, p0 = initial_guess)\n tau.append(popt[0][0])\n\n print('Median: ', np.median(tau))\n print('Min: ', np.min(tau))\n print('Max: ', np.max(tau))\n return 0\n\n plt.plot(self.xaxis, self.averaged_data, label = 'Average of all models')\n plt.plot(exponential(self.xaxis, *initial_guess), label = 'Initial Guess')\n for i in range(len(popt)):\n plt.plot(self.xaxis, exponential(self.xaxis, *popt[i]), label = 'Best Fit: time = ' + str(*popt[i]) + ' (ms)')\n plt.xlabel('Time (ms)')\n plt.ylabel('Normalized Current')\n plt.title('Recovery from Inactivation')\n plt.legend()\n plt.savefig('recovery_exponential_fit.png')\n return popt", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def heat_flux_out(T_inf, T_old, hc_air, emmi):\n\n #nz = T_old.shape[0]\n ny = T_old.shape[0]\n nx = T_old.shape[1]\n\n Q_out = np.zeros((ny, nx))\n h_eff = np.zeros((ny, nx))\n T_eff = np.zeros((ny, nx))\n for i in range(nx):\n for j in range(ny):\n T_eff[j, i] = ((T_old[j, i]**3) + (T_inf * T_old[j, i]**2)\n + (T_old[j, i] * T_inf**2) + T_inf**3)\n\n h_eff[j, i] = hc_air + (emmi*STEF_BOL_C*T_eff[j, i])\n\n Q_out[j, i] = h_eff[j, i] * (T_old[j, i] - T_inf)\n\n return Q_out", "def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def infectedToRecovered(self):\n\n # initialize a random matrix where around recovery_probability % of the values are True\n recover_prob_arr = np.random.rand(self.space.shape[0],self.space.shape[1]) < self.recovery_probability\n # find the overlap between infected and above array and make those people recovered\n self.space[np.logical_and(self.space == 1, recover_prob_arr)] = 2" ]
[ "0.72527164", "0.6295159", "0.6292347", "0.62802047", "0.6267784", "0.60749567", "0.59754294", "0.59540564", "0.58983105", "0.5896097", "0.589459", "0.5840689", "0.56996167", "0.56794786", "0.5667763", "0.5648994", "0.56376565", "0.56210124", "0.5620937", "0.55695546", "0.55411637", "0.5532295", "0.55132884", "0.54602915", "0.54493475", "0.5431156", "0.5414194", "0.5403336", "0.5380926", "0.5365836" ]
0.75004613
0
Calculate the capital costs. Attributes
def calc_capital_costs (self): road_needed = 'road needed' if self.cd['on road system']: road_needed = 'road not needed' dist = self.comp_specs['distance to community'] self.capital_costs = self.comp_specs['est. intertie cost per mile']\ [road_needed] * dist #~ print self.capital_costs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs", "def cost(self) -> float:", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def get_expected_cost(self):", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs", "def cost_a(self):\n return self._cost_a", "def calc_maintenance_cost(self):\n\n self.maintenance_cost = self.capital_costs * .01", "def getCosts(self):\n return self.costs", "def cost_b(self):\n return self._cost_b", "def calculate_cost(self, **kwargs):\n costs = {}\n if np.abs(self.agent.get_position()[0]) > self.x_lim:\n costs['cost_outside_bounds'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n\n return costs", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def set_costs_table(self) -> None:\n self.costs[\"B\"] = 2\n self.costs[\"A\"] = 6\n self.costs[\"fin\"] = float(\"inf\")", "def _load_costs(self):\n F_BM = self.F_BM\n F_D = self.F_D\n F_P = self.F_P\n F_M = self.F_M\n baseline_purchase_costs = self.baseline_purchase_costs\n purchase_costs = self.purchase_costs\n installed_costs = self.installed_costs\n \n # Load main costs\n for i in purchase_costs:\n if i not in baseline_purchase_costs:\n baseline_purchase_costs[i] = purchase_costs[i]\n for name, Cpb in baseline_purchase_costs.items(): \n if name in installed_costs and name in purchase_costs:\n continue # Assume costs already added elsewhere using another method\n F = F_D.get(name, 1.) * F_P.get(name, 1.) * F_M.get(name, 1.)\n try:\n installed_costs[name] = Cpb * (F_BM[name] + F - 1.)\n except KeyError:\n F_BM[name] = 1.\n installed_costs[name] = purchase_costs[name] = Cpb * F\n else:\n purchase_costs[name] = Cpb * F", "def compute_cost(AL, Y):\n pass", "def update_capital_stats(self):\n short_capital = 0\n long_capital = 0\n\n for pos in (self.active_long_positions + self.active_short_positions):\n\n if pos.order_type == Consts.LONG:\n long_capital += pos.get_current_liquid_capital()\n else:\n short_capital += pos.get_current_liquid_capital()\n\n self.short_capital = short_capital\n self.long_capital = long_capital", "def _set_costs(self):\n plant_size_kw = (self.sam_sys_inputs[\"resource_potential\"]\n / self._RESOURCE_POTENTIAL_MULT) * 1000\n\n cc_per_kw = self.sam_sys_inputs.pop(\"capital_cost_per_kw\", None)\n if cc_per_kw is not None:\n capital_cost = cc_per_kw * plant_size_kw\n logger.debug(\"Setting the capital_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost\n\n dc_per_well = self.sam_sys_inputs.pop(\"drill_cost_per_well\", None)\n num_wells = self.sam_sys_inputs.pop(\"prod_and_inj_wells_to_drill\",\n None)\n if dc_per_well is not None:\n if num_wells is None:\n msg = ('Could not determine number of wells to be drilled. '\n 'No drilling costs added!')\n logger.warning(msg)\n warn(msg)\n else:\n capital_cost = self.sam_sys_inputs[\"capital_cost\"]\n drill_cost = dc_per_well * num_wells\n logger.debug(\"Setting the drilling cost to ${:,.2f} \"\n \"({:.2f} wells at ${:,.2f} per well)\"\n .format(drill_cost, num_wells, dc_per_well))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost + drill_cost\n\n foc_per_kw = self.sam_sys_inputs.pop(\"fixed_operating_cost_per_kw\",\n None)\n if foc_per_kw is not None:\n fixed_operating_cost = foc_per_kw * plant_size_kw\n logger.debug(\"Setting the fixed_operating_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"fixed_operating_cost\"] = fixed_operating_cost", "def compute_cost(AL, Y):\n pass", "def calculate_profit(self):", "def calculate_atb_costs(self, year, scenario='Moderate'):\n if scenario == 'Advanced' or 'Moderate' or 'Conservative':\n return self._lookup_costs(year, scenario)\n else:\n raise ValueError(\"scenario type {} not recognized\".format(scenario))", "def calculate_total_cost(state):\n pass", "def _compute_calculate_cost(self):\n for order in self:\n amount_calculate_cost = 0.0\n for line in order.order_line:\n amount_calculate_cost += (line.product_id.standard_price * line.product_uom_qty)\n order.update({\n 'amount_calculate_cost': amount_calculate_cost\n })", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n z = self.agent.get_position()[2]\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions,\n # Drone should not leave valid operation space...\n cost_out_of_range=(1. if z > 2 else 0.)\n )\n # sum all costs in one total cost\n cs['cost'] = min(1, sum(v for k, v in cs.items() if k.startswith('cost_')))\n return cs", "def _trading_cost(self, current_weights, prev_weights):\n delta_weight = current_weights - prev_weights\n delta_weight = delta_weight[:-1] # No costs associated with risk-free asset\n trading_cost = self.kappa1 * cp.abs(delta_weight) + self.kappa2 * cp.square(delta_weight) # Vector of trading costs per asset\n\n return cp.sum(trading_cost)", "def create_costs():\n infinity = float(\"inf\")\n costs = {}\n costs['biysk'] = 0\n costs['barnaul'] = infinity\n costs['novosibirsk'] = infinity\n costs['belokurikha'] = infinity\n costs['tomsk'] = infinity\n costs['krasnoyarsk'] = infinity\n costs['omsk'] = infinity\n return costs", "def cost_b_v(self):\n return self._cost_b_v", "def calculate_total_cost(state):\r\n return state.cost()" ]
[ "0.7806206", "0.74040896", "0.6545671", "0.64966065", "0.64836544", "0.6464849", "0.63642645", "0.6326284", "0.63027006", "0.62339413", "0.62284034", "0.6213556", "0.6188633", "0.61863375", "0.6160501", "0.61119217", "0.60997415", "0.60323167", "0.599988", "0.5985151", "0.59568524", "0.5935343", "0.5934236", "0.5933673", "0.58778775", "0.582077", "0.58155733", "0.5785474", "0.575747", "0.5714629" ]
0.7694785
1
Calculate annual electric savings created by the project. Attributes
def calc_annual_electric_savings (self): costs = self.comp_specs['diesel generator o&m'] for kW in costs.keys(): try: if self.average_load < int(kW): maintenance = self.comp_specs['diesel generator o&m'][kW] break except ValueError: maintenance = self.comp_specs['diesel generator o&m'][kW] self.baseline_generation_cost = maintenance + \ (self.pre_intertie_generation_fuel_used * self.diesel_prices) maintenance = self.capital_costs * \ (self.comp_specs['percent o&m'] / 100.0) self.proposed_generation_cost = maintenance + \ self.intertie_offset_generation_fuel_used * \ self.intertie_diesel_prices self.annual_electric_savings = self.baseline_generation_cost -\ self.proposed_generation_cost #~ print len(self.annual_electric_savings) #~ print 'self.annual_electric_savings',self.annual_electric_savings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_heating_savings (self):\n price = self.diesel_prices + self.cd['heating fuel premium']\n maintenance = self.comp_specs['heat recovery o&m']\n self.annual_heating_savings = -1 * \\\n (maintenance + (self.lost_heat_recovery * price))", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def annualized_perf(self):\n mean_return = round(self.data.log_returns.mean() * 252, 4)\n risk = round(self.data.log_returns.std() * np.sqrt(252), 4)\n print(\"Return: {} | Risk: {}\".format(mean_return, risk))", "def find_eta_projection(self):\r\n \r\n # Get temporal range in terms of years\r\n timedelta = self.year_E_fore_gov[self.elms_E_fore_gov] - self.year_E_fore_gov[self.elms_E_fore_gov][0]\r\n # Number of years over time\r\n num_years = len(timedelta)\r\n \r\n self.t_eta_fit = np.zeros(num_years)\r\n \r\n for yr in range(0,num_years):\r\n \r\n self.t_eta_fit[yr] = timedelta[yr].days/365.25\r\n \r\n \r\n popt, _ = curve_fit(model_expected_eta,self.t_eta_fit,self.eta_gdp_fore[self.elms_E_fore_gov],p0=(0.7,0.1,0.01))\r\n \r\n self.eta_0 = popt[0]\r\n self.eta_b = popt[1]\r\n self.xi = popt[2]\r\n self.eta = model_expected_eta(self.t,self.eta_0,self.eta_b,self.xi)\r\n \r\n self.E_noncovid = model_emissions(self.eta,self.Y_noncovid)\r\n \r\n return", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def annual_energy(self):\n return self['annual_energy']", "def average(self, returns):\r\n return returns.mean() * self.day", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def __cal_aod(self, year, month, day):\n print 'Calculate...'\n logging.info('[calculate]->Calculate...')\n\n t = datetime.datetime(year, month, day)\n\n ddir = self.aodSetting.data_dir\n wdir = self.aodSetting.p_aot_dir\n ascdir = self.aodSetting.ascii_dir\n aotdir = self.aodSetting.aot_dir\n\n stations = self.aodSetting.stations\n\n # Calculate AOD\n print 'Calculate AOD...'\n logging.info('[calculate]->Calculate AOD...')\n\n for stId in stations.getstIds():\n station = stations.get(stId)\n fn = station.stId\n k7fn = path.join(self.aodSetting.merge_dir, fn, t.strftime('%Y%m'), fn + \"_\" +\n t.strftime(\"%Y%m%d\") + \"_merge.K7\")\n if not os.path.exists(k7fn):\n continue\n print '[{0}]: Ready'.format(fn)\n logging.info('[calculate]->[{0}]: Ready'.format(fn))\n nsu_dir = path.join(ascdir, fn, t.strftime('%Y%m'))\n nsufn = path.join(nsu_dir, fn + \"_\" +\n t.strftime(\"%Y%m%d\") + '.NSU')\n if not os.path.exists(nsufn):\n if not os.path.exists(nsu_dir):\n os.makedirs(nsu_dir)\n rr = spdata.decode(k7fn)\n r = spdata.extract(rr, 'NSU')\n spdata.save(r, nsufn)\n print '[{0}]: Output nsu file'.format(fn)\n logging.info('[calculate]->[{0}]: Output nsu file'.format(fn))\n\n # check if the external program and the parameter files are ready\n validated = True\n exefn = self.aodSetting.p_aot_exe\n if not os.path.exists(exefn):\n print '[{0}]: Not Found Aot program, {1}'.format(fn, exefn)\n logging.warn(\n '[calculate]->[{0}]: Not Found Aot program, {1}'.format(fn, exefn))\n validated = False\n\n inputfn = self.aodSetting.p_aot_input\n if not os.path.exists(inputfn):\n print '[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn)\n logging.warn(\n '[calculate]->[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn))\n validated = False\n\n ozonefn = self.aodSetting.p_aot_ozone\n if not os.path.exists(ozonefn):\n print '[{0}]: Not Found ozone data, {1}'.format(fn, ozonefn)\n logging.warn(\n '[calculate]->[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn))\n validated = False\n\n calfn = path.join(self.aodSetting.p_cal_dir,\n \"calibr\" + station.calibr + \".cal\")\n if not os.path.exists(calfn):\n print '[{0}]: Not Found calculation paramter data, {1}'.format(fn, calfn)\n logging.warn(\n '[calculate]->[{0}]: Not Found calculation paramter data, {1}'.format(fn, calfn))\n validated = False\n\n if validated:\n tao_dir = path.join(aotdir, fn, t.strftime('%Y%m'))\n if not os.path.exists(tao_dir):\n os.makedirs(tao_dir)\n taofn = path.join(tao_dir, fn + \"_\" +\n t.strftime(\"%Y%m%d\") + '.tao')\n lat = station.lat\n lon = station.lon\n alt = station.alt\n\n spdata.cal_aot(wdir, calfn, taofn, nsufn,\n lat, lon, alt, alpha=1)\n print '[{0}] => {1}'.format(fn, taofn)\n logging.info('[calculate]->[{0}] => {1}'.format(fn, taofn))\n else:\n print '[{0}]: Abort'.format(fn)\n logging.warn('[calculate]->[{0}]: Abort'.format(fn))\n\n print 'Calculate Done!'\n logging.info('[calculate]->Calculate Done!')", "def annualized_return_risk(vals):\n P = 252\n v = np.array(vals)\n vt1 = v[1:]\n vt = v[:-1]\n rets = (vt1-vt)/vt\n \n ann_return = np.mean(rets)*P\n ann_risk = np.std(rets)*np.sqrt(P)\n \n return ann_return, ann_risk", "def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def americanprice(self):\n self.americanpay = np.zeros((self.steps+1,self.steps+1))\n self.optionvalue = np.zeros((self.steps+1,self.steps+1))\n self.exercisevalue = np.zeros((self.steps+1,self.steps+1))\n self.americanpay[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n self.optionvalue[i-1][j] = (self.americanpay[i][j]*self.upprob + self.americanpay[i][j+1]*(1-self.upprob))/discount\n self.exercisevalue[i-1][j] = max(self.pricetree[i-1][j]-self.s,0.0)\n self.americanpay[i-1][j] = max(self.optionvalue[i-1][j],self.exercisevalue[i-1][j])\n return self.americanpay[0][0]", "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def calc_annual_investment(devs, param):\n\n observation_time = param[\"observation_time\"]\n interest_rate = param[\"interest_rate\"]\n q = 1 + param[\"interest_rate\"]\n\n \n # Calculate capital recovery factor\n CRF = ((q**observation_time)*interest_rate)/((q**observation_time)-1)\n \n # Calculate annuity factor for each device\n for device in devs.keys():\n \n # Get device life time\n life_time = devs[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs[device][\"ann_factor\"] = (1 - res_value) * CRF \n else:\n devs[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF \n \n\n return devs", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline", "def annual_fee(self, working_months, year, with_bpjs=True):\n monthly_bpjs = []\n\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n # initialize variable for storing the annual bpjs\n annual_c_old_age_insurance = 0\n annual_i_old_age_insurance = 0\n annual_c_pension_insurance = 0\n annual_i_pension_insurance = 0\n annual_c_health_insurance = 0\n annual_i_health_insurance = 0\n annual_death_insurance = 0\n annual_accident_insurance = 0\n\n if with_bpjs is True:\n # only calculate bpjs if is enabled and automatically set everthing to zero when is false\n start_month = 1\n for month in range(start_month, working_months+1):\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary, month, year)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary, month, year)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n\n monthly_bpjs.append(monthly)\n\n annual_c_old_age_insurance = annual_c_old_age_insurance \\\n + company_old_age_insurance\n\n annual_i_old_age_insurance = annual_i_old_age_insurance \\\n + individual_old_age_insurance\n\n annual_c_pension_insurance = annual_c_pension_insurance \\\n + company_pension_insurance\n\n annual_i_pension_insurance = annual_i_pension_insurance \\\n + individual_pension_insurance\n\n annual_c_health_insurance = annual_c_health_insurance \\\n + company_health_insurance\n\n annual_i_health_insurance = annual_i_health_insurance \\\n + individual_health_insurance\n\n annual_death_insurance = annual_death_insurance\\\n + death_insurance\n\n annual_accident_insurance = annual_accident_insurance\\\n + accident_insurance\n #end for\n\n annual_bpjs = {\n \"old_age_insurance\" : {\n \"company\" : annual_c_old_age_insurance,\n \"individual\" : annual_i_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : annual_c_pension_insurance,\n \"individual\" : annual_i_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : annual_c_health_insurance,\n \"individual\" : annual_i_health_insurance,\n },\n \"death_insurance\" : annual_death_insurance,\n \"accident_insurance\" : annual_accident_insurance\n }\n return annual_bpjs", "def generate_organisation_addition(self):\n\t\treserved_columns = list()\n\t\ttotal_attandance = list()\n\t\tn = list()\n\t\tfor column in self.days[0].data.columns:\n\t\t\tif column.startswith('reserved_'):\n\t\t\t\treserved_columns.append(column)\n\t\t\t\ttotal_attandance.append(0)\n\t\t\t\tn.append(0)\n\n\t\tfor day in self.days:\n\t\t\tfor index, row in day.data.iterrows():\n\t\t\t\tfor i, column in enumerate(reserved_columns):\n\t\t\t\t\tif int(row[column]) > 0:\n\t\t\t\t\t\tweekend = True\n\t\t\t\t\t\tif int(row['day_of_week']) < 5:\n\t\t\t\t\t\t\tweekend = False\n\t\t\t\t\t\ttotal_attandance[i] += row['pool'] - self.get_average_for_month_at_time(int(row['month'])-1, int(row['hour']), int(row['minute']), weekend)\n\t\t\t\t\t\tn[i] += 1\n\n\t\tself.org_addition = dict()\n\t\tfor i, column in enumerate(reserved_columns):\n\t\t\tif n[i] > 0:\n\t\t\t\tself.org_addition[column] = total_attandance[i]/n[i]\n\t\t\telse:\n\t\t\t\tself.org_addition[column] = 0", "def annualized_volatility(self):\n return self.daily_std() * math.sqrt(252)", "def SetupYearRecForIncomeTax(\n self, earnings=0, oas=0, gis=0, cpp=0, ei=0,\n rrsp=0, bridging=0,nonreg=0, gains=0, eoy_gains=0,\n unapplied_losses=0, rrsp_contributions=0,\n age=30, retired=False, cpi=1):\n j_canuck = person.Person(strategy=self.default_strategy)\n j_canuck.capital_loss_carry_forward = unapplied_losses\n j_canuck.age += age - world.START_AGE\n j_canuck.year += age - world.START_AGE\n j_canuck.retired = retired\n\n year_rec = utils.YearRecord()\n year_rec.is_retired = j_canuck.retired\n year_rec.year = j_canuck.year\n year_rec.incomes.append(incomes.IncomeReceipt(earnings, incomes.INCOME_TYPE_EARNINGS))\n year_rec.incomes.append(incomes.IncomeReceipt(oas, incomes.INCOME_TYPE_OAS))\n year_rec.incomes.append(incomes.IncomeReceipt(gis, incomes.INCOME_TYPE_GIS))\n year_rec.incomes.append(incomes.IncomeReceipt(cpp, incomes.INCOME_TYPE_CPP))\n year_rec.incomes.append(incomes.IncomeReceipt(ei, incomes.INCOME_TYPE_EI))\n year_rec.withdrawals.append(funds.WithdrawReceipt(nonreg, gains, funds.FUND_TYPE_NONREG))\n year_rec.withdrawals.append(funds.WithdrawReceipt(rrsp, 0, funds.FUND_TYPE_RRSP))\n year_rec.withdrawals.append(funds.WithdrawReceipt(bridging, 0, funds.FUND_TYPE_BRIDGING))\n year_rec.tax_receipts.append(funds.TaxReceipt(eoy_gains, funds.FUND_TYPE_NONREG))\n year_rec.deposits.append(funds.DepositReceipt(rrsp_contributions, funds.FUND_TYPE_RRSP))\n year_rec.cpi = cpi\n\n year_rec = j_canuck.CalcPayrollDeductions(year_rec)\n\n return (j_canuck, year_rec)", "def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]" ]
[ "0.7408717", "0.6447516", "0.61507213", "0.56581444", "0.56386214", "0.56319666", "0.56268173", "0.5622094", "0.55830747", "0.55610895", "0.55515134", "0.5541858", "0.5536161", "0.5518753", "0.54845977", "0.5477718", "0.54517406", "0.54387516", "0.54357356", "0.54199076", "0.53974795", "0.5387261", "0.5361069", "0.53575367", "0.53433263", "0.5341639", "0.5339729", "0.5320629", "0.5319645", "0.5316315" ]
0.70783615
1
Calculate annual heating savings created by the project. Attributes
def calc_annual_heating_savings (self): price = self.diesel_prices + self.cd['heating fuel premium'] maintenance = self.comp_specs['heat recovery o&m'] self.annual_heating_savings = -1 * \ (maintenance + (self.lost_heat_recovery * price))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def annualized_perf(self):\n mean_return = round(self.data.log_returns.mean() * 252, 4)\n risk = round(self.data.log_returns.std() * np.sqrt(252), 4)\n print(\"Return: {} | Risk: {}\".format(mean_return, risk))", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def annual_fee(self, working_months, year, with_bpjs=True):\n monthly_bpjs = []\n\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n # initialize variable for storing the annual bpjs\n annual_c_old_age_insurance = 0\n annual_i_old_age_insurance = 0\n annual_c_pension_insurance = 0\n annual_i_pension_insurance = 0\n annual_c_health_insurance = 0\n annual_i_health_insurance = 0\n annual_death_insurance = 0\n annual_accident_insurance = 0\n\n if with_bpjs is True:\n # only calculate bpjs if is enabled and automatically set everthing to zero when is false\n start_month = 1\n for month in range(start_month, working_months+1):\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary, month, year)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary, month, year)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n\n monthly_bpjs.append(monthly)\n\n annual_c_old_age_insurance = annual_c_old_age_insurance \\\n + company_old_age_insurance\n\n annual_i_old_age_insurance = annual_i_old_age_insurance \\\n + individual_old_age_insurance\n\n annual_c_pension_insurance = annual_c_pension_insurance \\\n + company_pension_insurance\n\n annual_i_pension_insurance = annual_i_pension_insurance \\\n + individual_pension_insurance\n\n annual_c_health_insurance = annual_c_health_insurance \\\n + company_health_insurance\n\n annual_i_health_insurance = annual_i_health_insurance \\\n + individual_health_insurance\n\n annual_death_insurance = annual_death_insurance\\\n + death_insurance\n\n annual_accident_insurance = annual_accident_insurance\\\n + accident_insurance\n #end for\n\n annual_bpjs = {\n \"old_age_insurance\" : {\n \"company\" : annual_c_old_age_insurance,\n \"individual\" : annual_i_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : annual_c_pension_insurance,\n \"individual\" : annual_i_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : annual_c_health_insurance,\n \"individual\" : annual_i_health_insurance,\n },\n \"death_insurance\" : annual_death_insurance,\n \"accident_insurance\" : annual_accident_insurance\n }\n return annual_bpjs", "def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self", "def annual_update(self, state, weather, time):\n soil = state.soil\n crop_type = state.crop.current_crop\n animal_management = state.animal_management\n feed = state.feed\n\n soil.annual_mass_balance()\n\n for variable in self.annual_variables:\n self.annual_variables[variable][2] = \\\n eval(self.annual_variables[variable][0], globals(), locals())", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def annual_energy(self):\n return self['annual_energy']", "def annualized_return_risk(vals):\n P = 252\n v = np.array(vals)\n vt1 = v[1:]\n vt = v[:-1]\n rets = (vt1-vt)/vt\n \n ann_return = np.mean(rets)*P\n ann_risk = np.std(rets)*np.sqrt(P)\n \n return ann_return, ann_risk", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def monthly_fee(self):\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n return monthly", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def years_to_pay(self) -> float:\n return round(self.term / self.term_multiplier * self.n_periods / 12, 1)", "def average(self, returns):\r\n return returns.mean() * self.day", "def annualized_volatility(self):\n return self.daily_std() * math.sqrt(252)", "def annual_summary(self):\n \n #Initialize dict with info about all of year's storms\n hurdat_year = {'id':[],'operational_id':[],'name':[],'max_wspd':[],'min_mslp':[],'category':[],'ace':[]}\n \n #Search for corresponding entry in keys\n count_ss_pure = 0\n count_ss_partial = 0\n iterate_id = 1\n for key in self.dict.keys():\n\n #Retrieve info about storm\n temp_name = self.dict[key]['name']\n temp_vmax = np.array(self.dict[key]['vmax'])\n temp_mslp = np.array(self.dict[key]['mslp'])\n temp_type = np.array(self.dict[key]['type'])\n temp_time = np.array(self.dict[key]['date'])\n temp_ace = self.dict[key]['ace']\n\n #Get indices of all tropical/subtropical time steps\n idx = np.where((temp_type == 'SS') | (temp_type == 'SD') | (temp_type == 'TD') | (temp_type == 'TS') | (temp_type == 'HU'))\n\n #Get times during existence of trop/subtrop storms\n if len(idx[0]) == 0: continue\n trop_time = temp_time[idx]\n if 'season_start' not in hurdat_year.keys():\n hurdat_year['season_start'] = trop_time[0]\n hurdat_year['season_end'] = trop_time[-1]\n\n #Get max/min values and check for nan's\n np_wnd = np.array(temp_vmax[idx])\n np_slp = np.array(temp_mslp[idx])\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n max_cat = -1\n else:\n max_wnd = int(np.nanmax(temp_vmax[idx]))\n max_cat = convert_category(np.nanmax(temp_vmax[idx]))\n if len(np_slp[~np.isnan(np_slp)]) == 0:\n min_slp = np.nan\n else:\n min_slp = int(np.nanmin(temp_mslp[idx]))\n\n #Append to dict\n hurdat_year['id'].append(key)\n hurdat_year['name'].append(temp_name)\n hurdat_year['max_wspd'].append(max_wnd)\n hurdat_year['min_mslp'].append(min_slp)\n hurdat_year['category'].append(max_cat)\n hurdat_year['ace'].append(temp_ace)\n hurdat_year['operational_id'].append(self.dict[key]['operational_id'])\n \n #Handle operational vs. non-operational storms\n\n #Check for purely subtropical storms\n if 'SS' in temp_type and True not in np.isin(temp_type,['TD','TS','HU']):\n count_ss_pure += 1\n\n #Check for partially subtropical storms\n if 'SS' in temp_type:\n count_ss_partial += 1\n\n #Add generic season info\n hurdat_year['season_storms'] = len(hurdat_year['name'])\n narray = np.array(hurdat_year['max_wspd'])\n narray = narray[~np.isnan(narray)]\n hurdat_year['season_named'] = len(narray[narray>=34])\n hurdat_year['season_hurricane'] = len(narray[narray>=65])\n hurdat_year['season_major'] = len(narray[narray>=100])\n hurdat_year['season_ace'] = np.sum(hurdat_year['ace'])\n hurdat_year['season_subtrop_pure'] = count_ss_pure\n hurdat_year['season_subtrop_partial'] = count_ss_partial\n \n #Return object\n return hurdat_year", "def calculate_profit(self):", "def __init__(self):\n self.annual_interest_rate = 10.0 / 100.0\n self.initial_loan_date = date(2014, 12, 1)\n self.currency = 'HKD'\n self.total_loan_amount = Money('100000.00', 'HKD')\n self.final_payment_date = self.initial_loan_date + \\\n relativedelta(years=1)", "def calcAnnualWeightedAveInsolation(latitude, slope, azimuth):\n\tdf = calcTotalInsolation(latitude, slope, azimuth)\n\treturn np.dot(\n\t\tnp.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]),\n\t\tdf['insolation_tilted']\n\t\t) / 365.0", "def batting_average(df,start_year,end_year,bat_met,player_name):\n\n base_fields = ['H','AB']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)", "def yearlyDepreciation():\n return .10", "def averageTime(self):\n \n pass", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def avg_annual_returns(end_of_year_returns, mstat):\n\n # imports mean stats\n from scipy.stats import mstats\n\n # converts returns dict to an array (in decimal fmt)\n returns_arr = np.array(list(end_of_year_returns.values()))/100\n\n if mstat == 'geometric':\n\n # calculates the geometric mean\n gmean_returns = (mstats.gmean(1 + returns_arr) - 1)*100\n\n return round(gmean_returns, 2)\n\n if mstat == 'arithmetic':\n\n # calculates the arithmetic mean\n mean_returns = np.mean(returns_arr)\n\n return round(mean_returns, 2)", "def annual_series(events):\n annually_series = pd.Series(data=events[COL.MAX_OVERLAPPING_SUM].values,\n index=events[COL.START].values,\n name=COL.MAX_OVERLAPPING_SUM).resample('AS').max()\n annually_series = annually_series.sort_values(ascending=False).reset_index(drop=True)\n\n mean_sample_rainfall = annually_series.mean()\n sample_size = annually_series.count()\n\n x = -np.log(np.log((sample_size + 0.2) / (sample_size - (annually_series.index.values + 1.0) + 0.6)))\n x_mean = x.mean()\n\n w = ((x * annually_series).sum() - sample_size * mean_sample_rainfall * x_mean) / \\\n ((x ** 2).sum() - sample_size * x_mean ** 2)\n u = mean_sample_rainfall - w * x_mean\n\n return {'u': u, 'w': w}", "def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]", "def runRandomEntryStrat(self):\n start, end = self.randomDays()\n \n gain = (self.df.adj_close[end] - getInfl(self.df.adj_close[start], start.year, end.year)) / \\\n getInfl(self.df.adj_close[start], start.year, end.year)\n #if gain > 6:\n # print \"Windfall: \", start, end, gain\n return gain" ]
[ "0.7293272", "0.65816814", "0.64126164", "0.5913036", "0.59082484", "0.55546623", "0.55409116", "0.5488795", "0.54877865", "0.5483977", "0.54786175", "0.5446557", "0.54036194", "0.5401703", "0.53986675", "0.53903943", "0.53836673", "0.538216", "0.537716", "0.5362062", "0.5353387", "0.5331724", "0.5327209", "0.53181463", "0.530514", "0.52991915", "0.5299093", "0.5298179", "0.5289651", "0.52871555" ]
0.69910264
1
Get total fuel saved. Returns float the total fuel saved in gallons
def get_fuel_total_saved (self): #~ print self.lost_heat_recovery #~ print self.intertie_offset_generation_fuel_used #~ print self.pre_intertie_generation_fuel_used #~ gen_eff = self.cd["diesel generation efficiency"] #~ fuel_used = self.intertie_offset_generation / gen_eff generation_diesel_reduction = \ np.array(self.pre_intertie_generation_fuel_used\ [:self.actual_project_life]) return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\ generation_diesel_reduction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def total_energy(self):\n return self._total_energy", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])", "def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def total(self) -> float:\n return self._total", "def cargo_fuel(self):\n return self._cargo_fuel", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def get_total_supply() -> int:\n return total_supply", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def fuel_cost(self, update=False):\n if update or self._dfs['fuel_cost'] is None:\n self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)\n return self._dfs['fuel_cost']", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def total(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.total", "def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)", "def total(self):\n\t\treturn self._total", "def GetTotal(self):\n return(self.total)", "def bus_line_total_miles(self) -> float:\n return self.dss_obj.BUSF(11, 0)", "def get_total_elle(self):\r\n \r\n return str(round(self._total_elle, 2))", "def get_total(self):\r\n \r\n return str(round(self._total, 2))", "def totalFireBonusDamage(self):\n return int(self._baseFireBonusDamage +\n self._equipmentFireBonusDamage +\n self._statusFireBonusDamage)", "def get_total(self):\n\n # Total sum\n self.sum = 0.00\n\n # Determine which Check buttons are selected\n # and add the charges to find the total\n if self.check_1.get() == 1:\n self.sum += 30.00\n if self.check_2.get() == 1:\n self.sum += 20.00\n if self.check_3.get() == 1:\n self.sum += 40.00\n if self.check_4.get() == 1:\n self.sum += 100.00\n if self.check_5.get() == 1:\n self.sum += 35.00\n if self.check_6.get() == 1:\n self.sum += 200.00\n if self.check_7.get() == 1:\n self.sum += 20.00\n\n # Convert the sum to string\n # and store in StringVar object\n # to automatically update the total_val label\n self.sum_str.set(self.sum)", "def calculate_total_fuel_recursively(filename):\n return sum([calculate_fuel_recursively(mass) for mass in read_mass_from_file(filename)])", "def calc_total_fuel(mass):\n fuel = fuel_for_mass(mass)\n\n if fuel < 0:\n return 0\n\n added_fuel = calc_total_fuel(fuel)\n return fuel + added_fuel" ]
[ "0.824919", "0.67309505", "0.67212445", "0.6641931", "0.6640454", "0.65572923", "0.64542156", "0.6337531", "0.6322669", "0.6316941", "0.6301916", "0.6281241", "0.62791896", "0.6275253", "0.6265616", "0.61895245", "0.6137895", "0.61320716", "0.61031145", "0.6087333", "0.6077702", "0.60732543", "0.60173064", "0.6013059", "0.5992951", "0.5992199", "0.59789133", "0.5944898", "0.5942518", "0.5939736" ]
0.7722197
1
Get your current running jobs on the Sherlock cluster
def running_jobs_sherlock(): user = os.environ['USER'] return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "async def get_jobs(): \n return mngr.getAllJobs()", "def jobs(self):\n return self.get_jobs()", "def jobs(self):\n return self._jobs", "def _get_jobs():\n return _get_bigquery_service().jobs()", "def get_running_jobs(api_instance):\n namespace = \"default\"\n try:\n api_response = api_instance.list_namespaced_job(namespace)\n except ApiException as e:\n logger.exception(\"Exception while receiving running Jobs{}\".format(e))\n return api_response", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)", "def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def get_jobs(self):\n return list(self._jobs.values())", "def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def getWorkers(self):\n return self.workers", "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job", "def get_waiting_jobs(self):\n return []", "def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()", "def list(self):\n self.background_scheduler.print_jobs()", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def jobs(self):\n raise NotImplementedError()", "def list_jobs(arn=None, nextToken=None):\n pass", "def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]", "def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()", "def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def workers(self):\n return self.worker_list", "def _get_njobs_in_queue(self, username):" ]
[ "0.7408034", "0.72631824", "0.7173004", "0.6856391", "0.6788189", "0.6787631", "0.6698274", "0.6687841", "0.6678214", "0.6678214", "0.66645783", "0.6657631", "0.66119254", "0.65800935", "0.6543887", "0.65284514", "0.64695036", "0.6460383", "0.64549667", "0.64276236", "0.6394917", "0.6378155", "0.6356756", "0.63484544", "0.6314699", "0.6306425", "0.6291282", "0.6276802", "0.62740475", "0.6262553" ]
0.76263565
0
simply sends a message to the client address specified.
def send_net_message_client(message, client_addr): serverSocket.sendto(message, client_addr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg_client(msg, client):\r\n client.send(bytes(str(msg), \"utf-8\"))", "def sendToClient(self, client_id, message_type, message):\n if not client_id in self.client_to_socket:\n raise ValueError(\"The client with id {} does not exist\".format(client_id))\n self.sendToSocket(self.client_to_socket[client_id],message_type,message)", "def send_message(self, client, message):\n self.stdout.write(message)\n client.send(f'HTTP/1.1 200 OK\\r\\n\\r\\n{message}'.encode(\"utf-8\"))\n client.close()", "def send_msg(self, payload, to_addr, reply_to_addr):\n self._client.send_msg(payload, to_addr, reply_to_addr)", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)", "def send_stun(self, message, addr):\n logger.debug('%s > %s %s', self, addr, message)\n self.transport.sendto(bytes(message), addr)", "def client():\n host = '127.0.0.1'\n port = 8125\n sock = socket.socket(\n socket.AF_INET,\n socket.SOCK_DGRAM)\n sock.connect((host, port))\n def send(msg):\n sock.sendall(msg)\n return send", "def client(ip, port, message):\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'ascii'))\n response = str(sock.recv(BUF_SIZE), 'ascii')\n print(\"Client received: {}\".format(response))\n finally:\n sock.close()", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send_message(self, data):\n header, data = format_msg(data)\n self.server_socket.sendto(header, self.client_address)\n self.server_socket.sendto(data, self.client_address)", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send(self, message):\n self.sock.send(message)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send(self, client, data):\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send(self, msg):\n self.message('Me', msg)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def send(msg, dest=None):", "def send_to_client(self, ip_addr: str, data, compress=True):\n if self.host is not None:\n try:\n client_conn_obj = self.host.lookup_client(ip_addr)\n self.host.callback_client_send(client_conn_obj, data, compress)\n except MastermindErrorSocket as e:\n raise MastermindErrorSocket(e)\n except Networking.Host.ClientNotFoundException:\n logging.error(f\"Client at {ip_addr} is not connected\")\n else:\n raise MastermindErrorServer(\"Server is not available\")", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send(self, message_body: str, target: str):\n\t\tif target == 'local':\n\t\t\tself.client_process(message_body)\n\t\telse:\n\t\t\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n\t\t\t\ttry:\n\t\t\t\t\tsock.settimeout(1)\n\t\t\t\t\tsock.connect((target, self.channel_port))\n\t\t\t\t\tsock.send(message_body.encode())\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\tself.registry.delete_ip(target)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, address: Address, packet: StrictPacket):\n with self._clientDictLock:\n self._clients[address].send(packet)" ]
[ "0.7387198", "0.6993373", "0.6930433", "0.69239", "0.6904713", "0.68367815", "0.6811525", "0.67738485", "0.6729188", "0.6701323", "0.6656938", "0.6642157", "0.66237146", "0.6605746", "0.6597053", "0.6596277", "0.6579792", "0.65749264", "0.6560786", "0.6554997", "0.65531754", "0.65393853", "0.65379685", "0.6532616", "0.65285164", "0.65186024", "0.6511578", "0.65010464", "0.6479287", "0.6472287" ]
0.82171863
0
Sets the payee_wallet_id of this EscrowTransactionResponse.
def payee_wallet_id(self, payee_wallet_id): self._payee_wallet_id = payee_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payer_wallet_id(self, payer_wallet_id):\n\n self._payer_wallet_id = payer_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def earnings_rate_id(self, earnings_rate_id):\n\n self._earnings_rate_id = earnings_rate_id", "def get_wallet(self, walletId):\n return", "def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501\n\n self._id = None\n self._payee_wallet_id = None\n self._payer_wallet_id = None\n self._amount = None\n self._withdrawn = None\n self._escrow_address = None\n self._record_status = None\n self._create_date = None\n self._update_date = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if payee_wallet_id is not None:\n self.payee_wallet_id = payee_wallet_id\n if payer_wallet_id is not None:\n self.payer_wallet_id = payer_wallet_id\n if amount is not None:\n self.amount = amount\n if withdrawn is not None:\n self.withdrawn = withdrawn\n if escrow_address is not None:\n self.escrow_address = escrow_address\n if record_status is not None:\n self.record_status = record_status\n if create_date is not None:\n self.create_date = create_date\n if update_date is not None:\n self.update_date = update_date", "def response_id(self, response_id):\n\n self._response_id = response_id", "def payee(self, payee_id: str):\n return get_from_list(self.payees, \"id\", payee_id)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for frag in self.iter_fragments():\n frag.set_chain_id(chain_id)", "def payment_id(self, payment_id):\n\n self._payment_id = payment_id", "def envelope_id(self, envelope_id):\n\n self._envelope_id = envelope_id", "def transaction_id(self, transaction_id):\n if transaction_id is None:\n raise ValueError(\"Invalid value for `transaction_id`, must not be `None`\") # noqa: E501\n\n self._transaction_id = transaction_id", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for atm in self.iter_atoms():\n atm.set_chain_id(chain_id)", "def pay_fee(self, fee):\n self.wallet -= fee", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id" ]
[ "0.7311907", "0.573136", "0.54004514", "0.53062975", "0.53006345", "0.5269258", "0.5153421", "0.5153421", "0.4994353", "0.4974469", "0.49704325", "0.49286303", "0.49132255", "0.4910477", "0.4855245", "0.48479044", "0.48479044", "0.48479044", "0.48202246", "0.47650665", "0.47486928", "0.46032292", "0.46005285", "0.45976615", "0.45579433", "0.45507455", "0.4522407", "0.45100296", "0.44900107", "0.44844285" ]
0.81508285
0
Sets the payer_wallet_id of this EscrowTransactionResponse.
def payer_wallet_id(self, payer_wallet_id): self._payer_wallet_id = payer_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payee_wallet_id(self, payee_wallet_id):\n\n self._payee_wallet_id = payee_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def response_id(self, response_id):\n\n self._response_id = response_id", "def payment_id(self, payment_id):\n\n self._payment_id = payment_id", "def get_wallet(self, walletId):\n return", "def seller(self, seller):\n\n self._seller = seller", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def merchant_id(self, merchant_id):\n if merchant_id is None:\n raise ValueError(\"Invalid value for `merchant_id`, must not be `None`\") # noqa: E501\n\n self._merchant_id = merchant_id", "def driver_id(self, driver_id):\n\n self._driver_id = driver_id", "def buyer(self, buyer):\n\n self._buyer = buyer", "def survey_response_id(self, survey_response_id):\n\n self._survey_response_id = survey_response_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def transaction_id(self, transaction_id):\n if transaction_id is None:\n raise ValueError(\"Invalid value for `transaction_id`, must not be `None`\") # noqa: E501\n\n self._transaction_id = transaction_id", "def earnings_rate_id(self, earnings_rate_id):\n\n self._earnings_rate_id = earnings_rate_id", "def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501\n\n self._id = None\n self._payee_wallet_id = None\n self._payer_wallet_id = None\n self._amount = None\n self._withdrawn = None\n self._escrow_address = None\n self._record_status = None\n self._create_date = None\n self._update_date = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if payee_wallet_id is not None:\n self.payee_wallet_id = payee_wallet_id\n if payer_wallet_id is not None:\n self.payer_wallet_id = payer_wallet_id\n if amount is not None:\n self.amount = amount\n if withdrawn is not None:\n self.withdrawn = withdrawn\n if escrow_address is not None:\n self.escrow_address = escrow_address\n if record_status is not None:\n self.record_status = record_status\n if create_date is not None:\n self.create_date = create_date\n if update_date is not None:\n self.update_date = update_date", "def vendor_id(self, vendor_id):\n\n self._vendor_id = vendor_id", "def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def bank_id(self, bank_id):\n\n self._bank_id = bank_id", "def set_player_id(self, player_id):\n self.player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def vendorid(self, vendorid):\n\n self._vendorid = vendorid", "def set_up_trader_id(trader_dict):\n trader_id = 0\n for name, trader in trader_dict.items():\n trader.trader_id = str(trader_id)\n trader_id += 1" ]
[ "0.7696529", "0.60767055", "0.56332916", "0.56299317", "0.5272401", "0.5272401", "0.5213051", "0.5161533", "0.51283133", "0.5097511", "0.4929194", "0.48645753", "0.48536748", "0.48232916", "0.4775775", "0.47211295", "0.47165722", "0.4694163", "0.46884617", "0.4682201", "0.46582586", "0.4654451", "0.46310508", "0.46160367", "0.45630452", "0.45415622", "0.45124477", "0.45124477", "0.45003188", "0.44916996" ]
0.84063405
0
Sets the withdrawn of this EscrowTransactionResponse.
def withdrawn(self, withdrawn): self._withdrawn = withdrawn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def withdraw(self, responder):\n self._apply_decision(self.Status.WITHDRAWN, responder)", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def withdraw(self, amount):\n\n print(\"\\nWithdrawal - {self.name}\".format(self=self))\n\n # retrieves the available balance in the account\n availableBalance = self.getAvailableBalance()\n \n # checks for negative amount value \n if amount < 0:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Deposit amount cannot be a negative value.\")\n\n # checks whether amount requested is greater than the available balance\n elif amount > availableBalance:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Insufficient funds.\")\n\n # subtracts amount from account balance\n else:\n self.balance -= amount\n print(\"{0} has withdrew £{1:.2f}. New balance is £{2:.2f}\".format(self.name, amount, self.balance))", "def withdraw(self, amount, trigger_transaction, trans=None):\n\n #\n # validates the amount is positive\n self.validate_amount(amount)\n\n #\n # Validate the user has the amount for the withdraw\n if not self.check_sufficient_funds(amount):\n raise OverdraftException(self.user.username)\n\n #\n # creates the transaction\n category = TransactionType.objects.get(pk=TransactionTypeConstants.BonusCashWithdraw.value)\n\n #\n # makes the amount negative because it is a withdrawal\n self.create(category, -amount, trans)\n self.transaction_detail.trigger_transaction = trigger_transaction\n self.transaction_detail.save()\n\n Logger.log(ErrorCodes.INFO,\"Bonus Cash Withdraw\", self.user.username+\" withdrew \"+str(amount)+\" \"+self.accountName+\" from their account.\")", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def withdraw(self, account_number: int, withdrawal: float) -> bool: \n if (withdrawal <= self._accounts[account_number][1]):\n self._accounts[account_number][1] -= withdrawal\n return True\n else:\n return False", "def Withdrawn(self, default=[None]):\n return self.data.get('metadata', {}).get('withdrawn', default)", "def post_cancel_withdraw(self, withdraw_id: 'int') -> int:\n params = {\n \"withdraw-id\": withdraw_id\n }\n\n from huobi.service.wallet.post_cancel_withdraw import PostCancelWithdrawService\n return PostCancelWithdrawService(params).request(**self.__kwargs)", "def withdraw(self, amount):\n self.transactions += [('withdraw', amount)]\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def withdraw(self, request, *args, **kwargs):\n account = self.get_object()\n account_serializer = self.get_serializer()\n value = request.data.get(\"valor\", None)\n\n try:\n withdraw_result = account_serializer.withdraw(value, account)\n except ValueError as ve:\n return Response({\"detail\": \"Could not withdraw: {0}.\".format(ve),\n \"status_code\": status.HTTP_400_BAD_REQUEST}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(withdraw_result)", "def withdrawal(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(-amount)\n else:\n print('withdrawal error')", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def withdraw(self, amount):\n self.deposit(-amount)", "def withdraw(self, cr, uid, ids, amount, context=None):\n record = self.browse(cr, uid, ids, context=context)[0]\n current_amount = record.current_amount\n withdraw_amount = record.withdraw_amount\n if amount > current_amount:\n raise osv.except_osv(_('Constraint Error'), _(\"The the amount is greater than the Current Money!\"))\n\n record.write({'current_amount':current_amount - amount,\n 'withdraw_amount':withdraw_amount + amount })\n return True", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')", "def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,\n auto_commit: bool = True):\n row = (withdraw_id, tx_id, apply_time, asset, amount, fee)\n self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)", "def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def withdraw(self, amount):\n self.balance -= amount", "def test_early_out_withdrawal(self):\n with FakeClock(TIME_1):\n response = self.send_post(\"Participant\", self.participant)\n participant_id = response[\"participantId\"]\n response[\"providerLink\"] = [self.provider_link_2]\n response[\"withdrawalStatus\"] = \"EARLY_OUT\"\n response[\"withdrawalTimeStamp\"] = 1563907344169\n response[\"suspensionStatus\"] = \"NOT_SUSPENDED\"\n response[\"withdrawalReason\"] = \"TEST\"\n response[\"withdrawalReasonJustification\"] = \"This was a test account.\"\n path = \"Participant/%s\" % participant_id\n self.send_put(path, response, headers={\"If-Match\": 'W/\"1\"'})\n participant = self.send_get(path)\n self.assertEqual(participant[\"withdrawalStatus\"], \"EARLY_OUT\")\n self.assertEqual(participant[\"withdrawalTime\"], '2018-01-01T00:00:00')\n self.assertEqual(participant[\"withdrawalAuthored\"], '2019-07-23T18:42:24')", "def register_withdraw(self, withdraw_intent): \n if withdraw_intent > 0:\n self.teo.register_withdraw(self, withdraw_intent)", "def withdraw(self, amount):\n message = self.account.withdraw(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def Withdrawal(self):\n self.amount = (int)(raw_input (\" Enter your withdrawal amount \"))\n return self.amount", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def test_withdraw_success(client):\n usd = Asset.objects.create(\n code=\"USD\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=True,\n distribution_seed=Keypair.random().secret,\n )\n response = client.post(\n WITHDRAW_PATH, {\"asset_code\": usd.code, \"amount\": \"100\"}, follow=True\n )\n content = response.json()\n assert content[\"type\"] == \"interactive_customer_info_needed\"\n assert \"100\" in content[\"url\"]\n assert content.get(\"id\")\n\n t = Transaction.objects.filter(id=content.get(\"id\")).first()\n assert t\n assert t.stellar_account == \"test source address\"\n assert t.account_memo is None\n assert t.muxed_account is None\n assert t.asset.code == usd.code\n assert t.protocol == Transaction.PROTOCOL.sep24\n assert t.kind == Transaction.KIND.withdrawal\n assert t.status == Transaction.STATUS.incomplete\n assert t.receiving_anchor_account is None\n assert t.memo is None\n assert t.memo_type == Transaction.MEMO_TYPES.hash\n assert t.from_address is None", "async def legwithdraw(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_withdraw\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} and someone withdraws their Bill or Motion. \" \\\n f\"Note that you will never get a DM when a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} is the one withdrawing.\"\n\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} and someone withdraws their Bill or Motion.\"\n\n await ctx.send(message)", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance" ]
[ "0.5891516", "0.5690135", "0.5676766", "0.5597701", "0.55876815", "0.55855316", "0.55006677", "0.5495458", "0.5472348", "0.54654574", "0.5433065", "0.53317225", "0.5330039", "0.5313943", "0.5277022", "0.52171", "0.51859295", "0.51577157", "0.5134747", "0.5110238", "0.5054822", "0.5024524", "0.5004318", "0.5001757", "0.5001067", "0.4923886", "0.49143445", "0.48868704", "0.48662046", "0.48662046" ]
0.78851956
0
Sets the escrow_address of this EscrowTransactionResponse.
def escrow_address(self, escrow_address): self._escrow_address = escrow_address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_address(self, address):\n pass", "def address(self, address: object):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def address(self, address: str):\n\n self._address = address", "def set_contract_addr(self, addr):\n\t\tself.contract_addr = addr\n\t\tself._bank_inst = self._w3.eth.contract(\n\t\t\taddress=self.contract_addr,\n\t\t\tabi=self._iface[\"abi\"],\n\t\t)", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\")\n\n self._address = address", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def set_address(self,address): \n new_address = self._format_address(address)\n self.rs485.write_command('#00?8 {}'.format(new_address))\n self.rs485.clear_buffers()\n time.sleep(0.2)", "def setEthaddr(self):\n\t\tself.ethaddr = self.settings.getKeyValue('ethaddr')\n\t\tself.socket.send('setenv ethaddr ' + self.ethaddr+'\\r', 1)\n\t\treturn None", "def set_complete_address(self, complete_address):\n self.complete_address = complete_address", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def set_address(self, new_address, ):\n self.address.append(new_address)\n self.save()", "def setSicxAddress(self, _address: Address) -> None:\n self._sICX_address.set(_address)", "def set_address(self, address, defer=False):\n\n # The MAXUSB chip handles this for us, so we don't need to do anything.\n pass", "def set_remit_to_address(self, remit_to_address):\n self.remit_to_address = remit_to_address", "def setData(self,address):\n self.data = _pack_address(address)", "def rowguid(self, rowguid):\n\n self._rowguid = rowguid", "def address(self, address):\n if self.local_vars_configuration.client_side_validation and address is None: # noqa: E501\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address" ]
[ "0.60267246", "0.5820161", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.56964433", "0.5556428", "0.55402744", "0.55071515", "0.5419375", "0.5411141", "0.54009694", "0.538831", "0.5359071", "0.53501517", "0.5252209", "0.5244996", "0.5244996", "0.5235547", "0.52323544", "0.5160443", "0.5158121", "0.51524144", "0.51408887", "0.5102618" ]
0.83312446
0
Sets the record_status of this EscrowTransactionResponse.
def record_status(self, record_status): self._record_status = record_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def line_status(self, line_status):\n\n self._line_status = line_status", "def account_status(self, account_status):\n\n self._account_status = account_status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def set_recording(self, recording):\n self.record_states = recording", "def update_record_status(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/status\"\n record_data = {\n \"data\": [\n {\n \"ids\": [record.record_id],\n \"Candidate_Status\": record.status,\n \"comments\": record.comments\n }\n ],\n \"trigger\":[record.trigger]\n }\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.5884911", "0.5822956", "0.5775433", "0.56188405", "0.56188405", "0.56188405", "0.561294", "0.55961376", "0.5581018", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998" ]
0.783832
0
Sets the update_date of this EscrowTransactionResponse.
def update_date(self, update_date): self._update_date = update_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n if updated_date is None:\n raise ValueError(\"Invalid value for `updated_date`, must not be `None`\") # noqa: E501\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n if updated_date is None:\n raise ValueError(\"Invalid value for `updated_date`, must not be `None`\") # noqa: E501\n if updated_date is not None and len(updated_date) < 1:\n raise ValueError(\"Invalid value for `updated_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._updated_date = updated_date", "def update(self, date):\r\n self.date = date", "def updated(self, updated: datetime):\n\n self._updated = updated", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def set_date(self, date):\n self.date = date\n return", "def set_date(self, date):\n self.date = date", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def set_date(self, date):\n self.data['date'] = date", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def update_time(self, update_time):\n\n self._update_time = update_time", "def set_end_date(self, date):\n pass", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date" ]
[ "0.72074246", "0.72074246", "0.7175995", "0.6495363", "0.62919956", "0.6009002", "0.57752734", "0.5506892", "0.5506892", "0.5489573", "0.5442477", "0.54176337", "0.54176337", "0.54176337", "0.5408546", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5333897", "0.5304892", "0.53018886", "0.5301887", "0.5301887" ]
0.7807129
1
Helper to log the failed SQS records metric
def _log_failed(cls, count): MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_failures(self):\n for exception in self.queue_manager.failure_descriptions():\n self.logger.info(exception)", "def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=1)\n self.assertEqual(sms.logs.count(), 1)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=2)\n self.assertEqual(sms.logs.count(), 1)", "def append_record_failure():\n\t\tpass", "def _check_failures(self, response, batch=None):\n if not response.get('Failed'):\n return 0 # nothing to do here\n\n LOGGER.error('The following records failed to put to queue %s', self.queue.url)\n\n for failure in response['Failed']:\n # Pull out the record that matches this ID\n record = self._extract_message_by_id(batch, failure['Id']) if batch else None\n LOGGER.error(self._format_failure_message(failure, record=record))\n\n failed = len(response.get('Failed', []))\n self._log_failed(failed)\n\n # Raise an exception if this is the fault of the sender (us)\n if any(result['SenderFault'] for result in response['Failed']):\n raise SQSClientError('Failed to send records to SQS:\\n{}'.format(response))\n\n return failed", "def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))", "def submit_errors_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.errors\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )", "def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError('Shopping'))\n batch_job = BatchJob(affiliate_items, updater)\n\n with_message = 0\n for result in batch_job.run():\n with_message += (result.is_error and 'Shopping' in result.details)\n\n assert with_message == 4", "def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()", "def test_failed_deliveries_logging(self):\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=0)\n self.assertEqual(email.logs.count(), 0)\n\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=1)\n self.assertEqual(email.logs.count(), 1)\n\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=2)\n self.assertEqual(email.logs.count(), 1)", "def identify_result_error(self, record):\n return [\"error\"]", "def test_unique_buckets_invalid_record(self, mock_logging):\n self.client.received_messages = [{'Body': '{\"missing-key\": 1}'}]\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def manage_kafka_error(msg):\n logger.error(msg.error())", "def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)", "def test_unique_buckets_invalid_sqs(self, mock_logging):\n self.client.received_messages = ['wrong-format-test']\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def _log_error(self, err_msg):\n if self._on_error_action == \"raise\":\n raise InvalidDatasetError(err_msg)\n else:\n logger.warning(err_msg)", "def test_sqs_log_handler_error(self):\n try:\n extra = {\n \"test\": \"test logging\",\n \"num\": 1,\n 5: \"9\",\n \"float\": 1.75,\n \"nested\": {\"more\": \"data\"}\n }\n self.logger.error(\"test info message with properties\", extra=extra)\n body = self.retrieve_message()\n expected = (\"\"\"{\"asctime\": \"2016-01-01 00:00:00,000\", \"levelname\": \"ERROR\",\"\"\"\n \"\"\" \"message\": \"test info message with properties\",\"\"\"\n \"\"\" \"5\": \"9\", \"float\": 1.75, \"num\": 1,\"\"\"\n \"\"\" \"test\": \"test logging\", \"nested\": {\"more\": \"data\"}}\"\"\")\n except BaseException as err:\n self.fail(\"Should not raise exception, got {} instead\".format(err))\n self.assertEqual(body, expected)", "def error_handler(self, failure):\n log.error(failure)", "def log_failure(self, obj, message):\n super().log_failure(obj=obj, message=message)", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])", "def fail(msg):\n log('FAIL', msg)", "def remove_record_failure():\n\t\tpass", "def on_failed(self, status_code: int, request: Request):\n self.update_rate_limit(request)\n\n data = request.response.json()\n error = data[\"error\"]\n msg = f\"请求失败,状态码:{status_code},类型:{error['name']}, 信息:{error['message']}\"\n self.gateway.write_log(msg)", "def _handle_error(self, failure, item, spider):\n # do nothing, just log\n log.err(failure)", "def error(self, msg):\r\n self.logger.error(msg)", "def catchError(custom_message = \"\"):\n exc_type, exc_value, exc_traceback = sys.exc_info() \n traceback_details = {\n 'filename': exc_traceback.tb_frame.f_code.co_filename,\n 'lineno' : exc_traceback.tb_lineno,\n 'name' : exc_traceback.tb_frame.f_code.co_name,\n 'error_type' : exc_type.__name__,\n 'logtype' : \"error\",\n 'custom_message' : custom_message,\n 'message' : str(exc_value), # or see traceback._some_str()\n 'datetime' : str(datetime.datetime.now())\n }\n del(exc_type, exc_value, exc_traceback) # So we don't leave our local labels/objects dangling\n #print(traceback_details)\n query = {\"collection_name\" : \"toolLogs\", \"data\":traceback_details}\n dbu.insertData(query)", "def failed(self, id, err=''):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n \n if index == -1:\n return None\n\n records[index][\"status\"] = \"failed\"\n if 'end-time' in records[index]:\n records[index][\"end-time\"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if 'comments' in records[index]:\n records[index][\"comments\"] += \" failed{ \" + err + \" };\"\n\n self.db.update_row(index, records[index])\n\n _log.info('Test %s marked as failed with message %s.' % (str(id), str(err)))\n \n return records[index]", "def test_error_logging(self):\n # Verify nothing in the journal\n assert len(Record.objects.recent('heartbeat')) == 0\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': 'foosurvey',\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert len(errors) > 0\n\n # Verify there's one entry now.\n assert len(Record.objects.recent('heartbeat')) == 1", "def error(update, context):\n logging.warning('Update \"%s\" ', update)\n logging.exception(context.error)" ]
[ "0.65698266", "0.6314558", "0.6210403", "0.6180184", "0.61691684", "0.6075002", "0.6070457", "0.6059748", "0.6040478", "0.598686", "0.5963177", "0.5926306", "0.5920489", "0.5919631", "0.58492374", "0.5753404", "0.5748351", "0.5638719", "0.5619237", "0.56000847", "0.55767053", "0.554524", "0.55307674", "0.55232894", "0.54596126", "0.54188305", "0.54008555", "0.5392786", "0.53883153", "0.5387347" ]
0.8446626
0
Segment the records into batches that conform to SQS restrictions This will log any single record that is too large to send, and skip it.
def _message_batches(cls, records): # Dump the records to a list of minimal json records_json = [ json.dumps(record, separators=(',', ':')) for record in records ] current_batch_size = 0 current_batch = [] for record in records_json: line_len = len(record) # Check if the max size of the batch has been reached or if the current # record will exceed the max batch size and start a new batch if ((len(current_batch) == cls.MAX_BATCH_COUNT) or (current_batch_size + line_len > cls.MAX_BATCH_SIZE)): yield current_batch[:] current_batch_size = 0 del current_batch[:] if line_len > cls.MAX_BATCH_SIZE: LOGGER.error('Record too large (%d) to send to SQS:\n%s', line_len, record) cls._log_failed(1) continue # Add the record to the batch current_batch_size += line_len current_batch.append(record) # yield the result of the last batch (no need to copy via slicing) if current_batch: yield current_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._payload_size += rq_size\n if len(self._batch) >= self.max_entry_number:\n self._send_batch()", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def put_records_batch(\n client, stream_name: str, records: list, max_retries: int, max_batch_size: int = 500\n) -> None or List[dict]:\n\n retry_list = []\n\n for batch_index, batch in enumerate(split_list(records, max_batch_size)):\n records_to_send = create_records(batch)\n retries_left = max_retries\n\n while len(records_to_send) > 0:\n kinesis_response = client.put_records(\n Records=records_to_send, StreamName=stream_name,\n )\n\n if kinesis_response[\"FailedRecordCount\"] == 0:\n break\n else:\n index: int\n record: dict\n for index, record in enumerate(kinesis_response[\"Records\"]):\n if \"ErrorCode\" in record:\n # original records list and response record list have same order, guaranteed:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records\n logger.error(\n f\"A record failed with error: {record['ErrorCode']} {record['ErrorMessage']}\"\n )\n retry_list.append(records_to_send[index])\n\n records_to_send = retry_list\n retry_list = []\n\n if retries_left == 0:\n error_msg = (\n f\"No retries left, giving up on records: {records_to_send}\"\n )\n logger.error(error_msg)\n return records_to_send\n\n retries_left -= 1\n\n logger.info(f\"Waiting 500 ms before retrying\")\n time.sleep(0.5)\n\n return None", "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _send_messages_helper(entries):\n \"\"\"Inner helper function for sending messages with backoff_handler\n\n Args:\n entries (list<dict>): List of SQS SendMessageBatchRequestEntry items\n \"\"\"\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response\n\n message_entries = [\n {\n 'Id': str(idx),\n 'MessageBody': message\n } for idx, message in enumerate(batched_messages)\n ]\n\n # The try/except here is to catch any raised errors at the end of the backoff\n try:\n return _send_messages_helper(message_entries)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('SQS request failed')\n # Use the current length of the message_entries in case some records were\n # successful but others were not\n self._log_failed(len(message_entries))\n return", "def record_batch_size(self):\n return 10000", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def _flush_batch(self) -> None:\n batch_len = len(self._current_batch)\n if batch_len == 0:\n self.logger.debug('Nothing to flush.')\n return\n\n self.logger.debug(f'Flushing batch size {batch_len}')\n\n with self.LOCK:\n to_process_batch = list(self._current_batch)\n self._current_batch = list()\n\n log_event = EventFactory.create_log_event(to_process_batch, self.logger)\n\n self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event)\n\n if log_event is None:\n self.logger.exception('Error dispatching event: Cannot dispatch None event.')\n return\n\n try:\n self.event_dispatcher.dispatch_event(log_event)\n except Exception as e:\n self.logger.error(f'Error dispatching event: {log_event} {e}')", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def process_data():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n input_file = urllib.unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key']).encode('utf-8')\n s3.download_file(input_bucket_name, input_file, input_file)\n output_file = os.path.join(output_dir, os.path.splitext(input_file)[0]+'.csv')\n parse_patient_data(input_file, output_file)\n upload_data(output_file)\n cleanup_files(input_file, output_file)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def get_records(self, limit=None):\n #OK_TO_SEND_RECORD = {'state': 'oktosend', 'volume': 'oktosend'}\n checkpoint = self.checkpoint\n where_clauses = []\n # DB uses local time -> checkpoint and all timestamps are in local time\n start_time = None\n # initialized anyway below - end_time = datetime.datetime.now()\n ok_to_send_time = None\n end_by_limit = False\n\n if checkpoint:\n start_time = checkpoint.date()\n retrieved_previous_end_time = checkpoint.aux()\n checkpoint_interval = checkpoint.transaction()\n if checkpoint_interval is None:\n checkpoint_interval = 0\n else:\n checkpoint_interval = int(checkpoint_interval)\n if checkpoint_interval == 0:\n # NO interval, OK to send from the beginning\n DebugPrint(4, \"Sending OK_TO_SEND - no interval in checkpoint (%s/%s)\" %\n (start_time, checkpoint_interval))\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n else:\n estimated_previous_end_time = timeutil.wind_time(start_time, seconds=checkpoint_interval,\n backward=False)\n if retrieved_previous_end_time is not None and \\\n retrieved_previous_end_time != estimated_previous_end_time:\n DebugPrint(4, \"End_time in checkpoint not matching: estimated:%s, retrieved:%s\" %\n (estimated_previous_end_time, retrieved_previous_end_time))\n ok_to_send_time = estimated_previous_end_time\n DebugPrint(4, \"Loaded checkpoint: %s (-%s), %s (%s - %s)\" %\n (start_time, self.rollback, ok_to_send_time, checkpoint_interval, retrieved_previous_end_time))\n if self.rollback > 0:\n start_time = timeutil.wind_time(start_time, seconds=self.rollback)\n where_clauses.append(\"start >= '%s'\" % timeutil.format_datetime(start_time, iso8601=False))\n else:\n # NO Checkpoint - OK to send from the beginning\n DebugPrint(4, \"Sending OK_TO_SEND - no checkpoint\")\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n\n if limit > 0:\n end_time = timeutil.wind_time(start_time, hours=limit, backward=False)\n # If input_delay is 0, check that the end_time is not in the future\n delay_time = timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay)\n if end_time > delay_time:\n end_time = delay_time\n else:\n end_by_limit = True\n #end_time = min(end_time, timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay))\n else:\n end_time = timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay)\n if checkpoint or limit or self.input_delay>0:\n end_time = timeutil.at_minute(end_time)\n where_clauses.append(\"start < '%s'\" % timeutil.format_datetime(end_time, iso8601=False))\n if ok_to_send_time is not None and ok_to_send_time >= timeutil.wind_time(end_time, seconds=60):\n # If ok_to_send_time is not None, then checkpoint_interval was assigned (in checkpoint block)\n DebugPrint(2, \"End time comes before new records are encountered (%s - %s <= 60sec)\" %\n (end_time, ok_to_send_time))\n if end_by_limit:\n DebugPrint(2, \"To avoid misinterpreting records DataLengthMax in the config file must be > %s\" %\n (checkpoint_interval/3600))\n else:\n DebugPrint(2, \"Either the probe runs too frequently or DataLengthMax may be too short. \"\n \"Current interval (hours):\" %\n (checkpoint_interval/3600))\n if start_time is not None:\n if self.input_min_interval > 0:\n if start_time > timeutil.wind_time(end_time, seconds=self.input_min_interval):\n return\n else:\n if start_time >= end_time:\n return\n if where_clauses:\n where_sql = \"WHERE %s\" % \" AND \".join(where_clauses)\n else:\n where_sql = \"\"\n\n sql = '''SELECT\n node,\n volume,\n type,\n logname,\n start,\n finish,\n state,\n storage_group,\n reads,\n writes\n FROM tape_mounts\n %s\n ORDER BY start, storage_group\n ''' % (where_sql, )\n\n DebugPrint(4, \"Requesting new EnstoreTapeDrive records %s\" % sql)\n last_record_start_time = None\n first_record_start_time = None\n first_record = None\n mount_checkpoint = {}\n for r in self.query(sql):\n # Filter out values that are not acceptable\n if r['storage_group'] is None:\n continue\n if r['state'] not in ('M', 'D'):\n continue\n if ok_to_send_time is not None:\n # if ok_to_send_time is not None, checkpoint is True\n if r['start'] >= ok_to_send_time:\n # send also the current record (yield is after)\n # Time intervals are closed on the left (start) and open on the right (end)\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n # to send the record OK_TO_SEND_RECORD only once\n ok_to_send_time = None\n yield r\n if checkpoint:\n state = r['state']\n last_record_start_time = r['start'] # using start because finish could be NULL\n if first_record is None:\n first_record = r\n first_record_start_time = last_record_start_time\n if state == 'M':\n mount_checkpoint[r['volume']] = last_record_start_time\n elif state == 'D':\n mount_checkpoint[r['volume']] = None\n\n if last_record_start_time is not None and end_time is not None:\n # Looking 6mo before 4.27.2015 there are an average of 167 M od R records per hour\n if timeutil.wind_time(end_time, minutes=10) > last_record_start_time:\n DebugPrint(3, \"Warning, no records in the last 10 min of EnstoreTapeDrive probe (%s - %s)\" %\n (end_time, last_record_start_time))\n\n if checkpoint:\n # first_unresolved_mount if any should be before end_time\n first_unresolved_mount = end_time\n for i in mount_checkpoint.values():\n if i is not None and i < first_unresolved_mount:\n # If there are records (exist a i not None), first record variables are guaranteed not None\n if i > first_record_start_time or not end_by_limit:\n # it is past the first record or the time span is shorter than DataLengthMax\n # this guarantees that the new invocation will may have more records\n first_unresolved_mount = i\n else:\n # skip i for checkpoint consideration\n DebugPrint(2,\n \"Warning, reached DataLengthMax while the first mount record is still not matched.\\n\"\n \"Sending oktosend, a fake dismount record and advancing the checkpoint past it\")\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n yield self.get_dismount_mount_record(first_record, end_time)\n\n if first_unresolved_mount == end_time:\n checkpoint_interval = 0\n else:\n checkpoint_interval = timeutil.total_seconds(end_time-first_unresolved_mount)\n DebugPrint(4, \"Saving new EnstoreTapeDrive checkpoint: %s - %s (%s)\" %\n (first_unresolved_mount, end_time, checkpoint_interval))\n checkpoint.set_date_transaction_aux(first_unresolved_mount, checkpoint_interval, end_time)", "def test_block_bad_batch(self):\n pass", "def _buff_split(self, upload_buffer):\n if upload_buffer.intent_count() == 0:\n return\n tail_buffer = upload_buffer\n while True:\n if tail_buffer.length < self.recommended_upload_part_size + self.min_part_size:\n # `EmergePlanner_buff_partition` can split in such way that tail part\n # can be smaller than `min_part_size` - to avoid unnecessary download of possible\n # incoming copy intent, we don't split further\n yield tail_buffer\n return\n head_buff, tail_buffer = self._buff_partition(tail_buffer)\n yield head_buff", "def yield_chunked_events(self, events):\n for i in range(0, len(events), 5000):\n yield events[i:i + 5000]", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def _strip_successful_records(cls, messages, response):\n success_ids = {\n item['Id'] for item in response['Successful']\n }\n\n LOGGER.info('Removing sucessful message indices from batch: %s', success_ids)\n\n for success_id in success_ids:\n # Get the successful message by ID and remove it\n message = cls._extract_message_by_id(messages, success_id)\n if not message:\n continue\n messages.remove(message)", "def process( self, message ) :\n try:\n spot_request_msg = SpotRequestMsg( raw_json=message.get_body() )\n spot_request_uuid = spot_request_msg.spot_request_uuid\n spot_master_uuid = spot_request_msg.spot_master_uuid\n logger.info( fmt_request_uuid_msg_hdr( spot_request_uuid ) + 'process() for spot_master_uuid: ' + spot_master_uuid )\n spot_request_item = get_spot_request_item( self.spot_request_table_name, spot_request_msg.spot_request_uuid, region_name=self.region_name, profile_name=self.profile_name )\n ts_cmd_complete = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_BATCH_PROCESS_COMPLETE_TIMESTAMP]\n cmd_exception_message = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_BATCH_PROCESS_START_EXCEPTION_MESSAGE]\n cmd_exception_traceback = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_BATCH_PROCESS_START_EXCEPTION_TRACEBACK]\n key_value_pairs = {\n TableSpotRequest.is_open:0,\n TableSpotRequest.spot_request_state_code:SpotRequestStateCode.instance_complete_exception,\n TableSpotRequest.ts_cmd_complete:ts_cmd_complete,\n TableSpotRequest.cmd_exception_message:cmd_exception_message,\n TableSpotRequest.cmd_exception_traceback:cmd_exception_traceback,\n }\n spot_request_row_partial_save( self.spot_request_table_name, spot_request_item, key_value_pairs, region_name=self.region_name, profile_name=self.profile_name )\n self.spot_request_sqs_message_durable.delete_message(message) \n\n except StandardError as e:\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + 'Exiting SpotRequestDispatcher due to exception' )\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + str(e) )\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + traceback.format_exc() )", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def split_records(self, data):\n byte_array = bytearray(data)\n size = len(byte_array)\n split_data = [bytearray()]\n for index, byte in enumerate(byte_array):\n if index != size-1 and byte == 143 and byte_array[index+1] == 142:\n print(\"found delimeter byte 143,142 b'8f8e'\")\n split_data[-1].append(byte)\n split_data.append(bytearray())\n print(\"start new record\")\n else:\n split_data[-1].append(byte)\n return split_data", "def getBatchSize(self, context, obj):\n return 100", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def test_block_missing_batch(self):\n pass", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def test_batch_size(self):\n\n class A(Document):\n s = StringField()\n\n A.drop_collection()\n\n for i in range(100):\n A.objects.create(s=str(i))\n\n # test iterating over the result set\n cnt = 0\n for _ in A.objects.batch_size(10):\n cnt += 1\n assert cnt == 100\n\n # test chaining\n qs = A.objects.all()\n qs = qs.limit(10).batch_size(20).skip(91)\n cnt = 0\n for _ in qs:\n cnt += 1\n assert cnt == 9\n\n # test invalid batch size\n qs = A.objects.batch_size(-1)\n with pytest.raises(ValueError):\n list(qs)", "def midbatch_hook(self, progress, logging_epoch):\n pass", "def _on_too_many_orders(self, msg):\r\n self.debug(\"### Server said: '%s\" % msg[\"message\"])\r\n self.count_submitted -= 1\r\n self.signal_order_too_fast(self, msg)", "def wifi_scanner_batch_scan_full(self, scan_setting):\n self.dut.ed.clear_all_events()\n data = wutils.start_wifi_background_scan(self.dut, scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\"Wifi batch shot scan started with index: %s\", idx)\n #generating event wait time from scan setting plus leeway\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, scan_setting, self.stime_channel)\n # multiply scan period by two to account for scheduler changing period\n scan_time += scan_setting[\n 'periodInMs'] * 2 #add scan period delay for next cycle\n wait_time = scan_time / 1000 + self.leeway\n validity = False\n try:\n for snumber in range(1, 3):\n results = []\n event_name = \"%s%sonResults\" % (EVENT_TAG, idx)\n self.log.debug(\"Waiting for event: %s for time %s\", event_name,\n wait_time)\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event received: %s\", event)\n bssids, validity = self.proces_and_valid_batch_scan_result(\n event[\"data\"][\"Results\"], scan_rt, event[\"data\"][KEY_RET],\n scan_setting)\n event_name = \"%s%sonFullResult\" % (EVENT_TAG, idx)\n results = self.pop_scan_result_events(event_name)\n asserts.assert_true(\n len(results) >= bssids,\n \"Full single shot result don't match %s\" % len(results))\n asserts.assert_true(bssids > 0, EMPTY_RESULT)\n asserts.assert_true(validity, INVALID_RESULT)\n except queue.Empty as error:\n raise AssertionError(\"Event did not triggered for batch scan %s\" %\n error)\n finally:\n self.dut.droid.wifiScannerStopBackgroundScan(idx)\n self.dut.ed.clear_all_events()", "def _get_message_groups(\n self, messages: Iterator[AirbyteMessage], schema_inferrer: SchemaInferrer, limit: int\n ) -> Iterable[Union[StreamReadPages, AirbyteLogMessage]]:\n records_count = 0\n at_least_one_page_in_group = False\n current_page_records = []\n current_slice_pages = []\n current_page_request: Optional[HttpRequest] = None\n current_page_response: Optional[HttpResponse] = None\n\n while records_count < limit and (message := next(messages, None)):\n if self._need_to_close_page(at_least_one_page_in_group, message):\n self._close_page(current_page_request, current_page_response, current_slice_pages, current_page_records)\n current_page_request = None\n current_page_response = None\n\n if at_least_one_page_in_group and message.type == Type.LOG and message.log.message.startswith(\"slice:\"):\n yield StreamReadSlices(pages=current_slice_pages)\n current_slice_pages = []\n at_least_one_page_in_group = False\n elif message.type == Type.LOG and message.log.message.startswith(\"request:\"):\n if not at_least_one_page_in_group:\n at_least_one_page_in_group = True\n current_page_request = self._create_request_from_log_message(message.log)\n elif message.type == Type.LOG and message.log.message.startswith(\"response:\"):\n current_page_response = self._create_response_from_log_message(message.log)\n elif message.type == Type.LOG:\n yield message.log\n elif message.type == Type.RECORD:\n current_page_records.append(message.record.data)\n records_count += 1\n schema_inferrer.accumulate(message.record)\n else:\n self._close_page(current_page_request, current_page_response, current_slice_pages, current_page_records)\n yield StreamReadSlices(pages=current_slice_pages)" ]
[ "0.61672634", "0.61136776", "0.55916774", "0.55586743", "0.55274945", "0.5349872", "0.5317542", "0.5314152", "0.51939815", "0.519289", "0.5187157", "0.51492345", "0.5112248", "0.50969017", "0.5063732", "0.5059225", "0.5043429", "0.5042894", "0.5032821", "0.49991766", "0.49933842", "0.49830627", "0.49705377", "0.4968449", "0.49657884", "0.49600086", "0.49515098", "0.49494943", "0.493872", "0.4938432" ]
0.62689286
0
Inspect the response and remove any records records that have successfully to sent For each record, the index of the response element is the same as the index used in the request array.
def _strip_successful_records(cls, messages, response): success_ids = { item['Id'] for item in response['Successful'] } LOGGER.info('Removing sucessful message indices from batch: %s', success_ids) for success_id in success_ids: # Get the successful message by ID and remove it message = cls._extract_message_by_id(messages, success_id) if not message: continue messages.remove(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_none_response(self):\n\n print(\"# Rows before non response are removed: {} \".format(len(self.data)))\n self.data = self.data[self.data['names'].map(lambda d: len(d) > 0)]\n print(\"# Rows after non response are removed: {} \".format(len(self.data)))", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)", "def remove_record_failure():\n\t\tpass", "def sanitize_reply_buffer(self): \n for i in self.async_reply_buffer:\n\n if not i.endswith('\\n'):\n \n i = self.async_reply_buffer.index(i)\n temp = self.async_reply_buffer\n #with suppress(IndexError):\n if i+1 == len(temp):\n return 'SANFAIL'\n if i < len(temp):\n #print(i)\n #print(len(temp))\n #print(temp)\n #print(temp[i])\n #print(temp[i+1])\n temp[i] = temp[i] + temp[i+1]\n temp.pop(i+1)\n self.async_reply_buffer = temp\n\n\n #print(self.async_reply_buffer)", "def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "def __send_responses(self):\n # create a copy of the responses\n responses = self.__responses\n # for every response\n for response in responses:\n # send the response\n self.__send(response)\n # remove the response from the responses' list\n if response in self.__responses:\n self.__responses.remove(response)", "def _check_failures(self, response, batch=None):\n if not response.get('Failed'):\n return 0 # nothing to do here\n\n LOGGER.error('The following records failed to put to queue %s', self.queue.url)\n\n for failure in response['Failed']:\n # Pull out the record that matches this ID\n record = self._extract_message_by_id(batch, failure['Id']) if batch else None\n LOGGER.error(self._format_failure_message(failure, record=record))\n\n failed = len(response.get('Failed', []))\n self._log_failed(failed)\n\n # Raise an exception if this is the fault of the sender (us)\n if any(result['SenderFault'] for result in response['Failed']):\n raise SQSClientError('Failed to send records to SQS:\\n{}'.format(response))\n\n return failed", "def clearList(self):\n\n if not RequestsDAO().getRequests():\n return jsonify(Error=\"No requests found\"), 404\n else:\n\n RequestsDAO().truncateTurnTable()\n return jsonify(TURN=\"Table content was deleted\"), 200", "def _recordsToResponse(self, records):\n fieldsList = []\n count = 0\n if records:\n size = 0\n while size < self._maxSize:\n try:\n record = records.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n pickled = pickle.dumps(self.recordToDict(record))\n size = size + len(pickled)\n fieldsList.append(pickled)\n count += 1\n\n response = {\"items\": fieldsList}\n\n if records:\n response[\"continuation\"] = self._storeContinuation(records, \"records\")\n\n return response", "def drop_matching_records(self, check):\n matches = self._match(check)\n for rec in matches:\n self._drop_bytes(rec)\n del self._records[rec['msg_id']]", "def _on_tracking_failure(self, response, data):\n try:\n response = json.loads(response)\n except:\n # the response should be in JSON, but in case it can't be parsed just try another attempt\n logging.debug(\"cannot parse tracker response, should be valid JSON\")\n return response\n\n # remove the successfully tracked hits from payload\n tracked = response['tracked']\n data['requests'] = data['requests'][tracked:]\n\n return response['message']", "def test_response(self):\n for i, response in enumerate(RESPONSES):\n with self.subTest(i=i):\n self.assertDictContainsSubset(response, dict(self.responses[i].data))", "def reset(self):\n # Remove all successful action records\n to_remove = []\n for action_record, (p_valid, result_text) in self.action_records.items():\n if p_valid > .5:\n to_remove.append(action_record)\n for action_record in to_remove:\n del self.action_records[action_record]", "def response( self, request, error_code, data ):\n array = []\n if request == b'CAUTH' and data != self.__null_byte:\n # process differently\n data_array = self.ds_document.break_data(data)\n # print('after data is broken: {}'.format(data_array))\n for item in data_array: # for all the items we have to generate a different timestamp and checkum\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, item)\n array.append([request, checksum, timestamp, error_code, item])\n # print(array)\n # print(array)\n return array\n\n else: # if we are sending a generic response, then\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, data)\n\n array = [request, checksum, timestamp, error_code, data]\n return array", "def test_ten_results_returned(delete_previous_db_record):\n request = create_client().gateway.getResults(\n search=\"some string\").response()\n\n # Assert sucessful request\n assert_that(request.result.status, equal_to('200'))\n\n \"\"\"\n I'm assuming the json object uses a list to contain\n the results\n \"\"\"\n assert_that(len(request.result.results, equal_to(10)))", "def test_remove_expired(self):\n req1 = FakeRequest(1, True)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, True)\n req5 = FakeRequest(5, False)\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.remove_expired()\n\n self.assertTrue(\n req2 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )", "def consume_data(self, data):\n # Get parameters\n logger_manager = data['logger_manager']\n doc_m = data['document_manager']\n message_id = data['message_id']\n documents = data['documents']\n to_remove_queue = data['to_remove_queue']\n duplicates = no_requestInTs = 0\n hash_set = set()\n\n for current_document in documents:\n\n # Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)\n if current_document['requestInTs'] is None and current_document['securityServerType'] is None:\n to_remove_queue.put(current_document['_id'])\n no_requestInTs += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('no_requestInTs',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is batch duplicated\n current_document_hash = doc_m.calculate_hash(current_document)\n if current_document_hash in hash_set:\n # If yes, mark to removal\n to_remove_queue.put(current_document['_id'])\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('batch_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is database duplicated\n if self.db_m.check_if_hash_exists(current_document_hash):\n # If here, add to batch duplicate cache\n hash_set.add(current_document_hash)\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('database_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Mark hash as seen\n hash_set.add(current_document_hash)\n # Find possible matching documents\n matching_documents = self.db_m.find_by_message_id(current_document)\n # Try to match the current document with possible pairs (regular)\n merged_document = doc_m.find_match(current_document, matching_documents)\n matching_type = ''\n\n if merged_document is None:\n # Try to match the current document with orphan-matching\n merged_document = doc_m.find_orphan_match(current_document, matching_documents)\n if merged_document is not None:\n matching_type = 'orphan_pair'\n else:\n matching_type = 'regular_pair'\n\n if merged_document is None:\n matching_type = 'orphan'\n if current_document['securityServerType'] == 'Producer':\n new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)\n else:\n if current_document['securityServerType'] != 'Client':\n current_document['securityServerType'] = 'Client'\n new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)\n\n new_document = doc_m.apply_calculations(new_document)\n new_document['correctorTime'] = database_manager.get_timestamp()\n new_document['correctorStatus'] = 'processing'\n new_document['matchingType'] = matching_type\n\n # Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair\n if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \\\n and new_document['client']['clientXRoadInstance'] is None:\n new_document['correctorStatus'] = 'done'\n new_document['matchingType'] = 'orphan'\n\n self.db_m.add_to_clean_data(new_document)\n\n else:\n\n if current_document['securityServerType'] == 'Client':\n\n if merged_document['client'] is None:\n merged_document['client'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['clientHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)\n logger_manager.log_warning('corrector_merging', msg)\n\n else:\n\n if merged_document['producer'] is None:\n merged_document['producer'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['producerHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)\n logger_manager.log_error('corrector_merging', msg)\n\n self.db_m.mark_as_corrected(current_document)\n\n if no_requestInTs:\n msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)\n logger_manager.log_warning('corrector_no_requestInTs', msg)\n\n return duplicates", "def curent_sesion_cleanup(self):\r\n\r\n for key,value in self.curent_sesion.items():\r\n for idx in value:\r\n requests.delete(key + str(idx), headers=self.headers)\r\n for check in requests.get(key,headers=self.headers).json()['results']:\r\n if idx in check.values():\r\n return False\r\n self.curent_sesion[key].clear()\r\n return True", "def forget(self, request):\n return []", "def forget(self, request):\n return []", "def check_for_requests(self):\n while True:\n doc = self.cc.requests_coll.find_one_and_delete(\n {'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)]\n )\n if doc is None:\n break\n\n if doc['action'] == 'validate_upload':\n print(\"fulfil request: set valid: {} for upload_id {}\".format(doc['valid'], doc['upload_id']))\n self.validate_upload(ObjectId(doc['upload_id']), doc['valid'])", "def remove_deleted_dos_records():\n count = 0\n dos = DirectlyObservedSprayingForm.objects.last()\n formid = dos.data.get(\"_xform_id\") if dos else DIRECTLY_OBSERVED_FORM_ID\n if formid:\n data = fetch_form_data(formid, dataids_only=True)\n if not data:\n return count\n\n pks = [i[\"_id\"] for i in data]\n deleted_submissions = DirectlyObservedSprayingForm.objects.exclude(\n submission_id__in=pks\n )\n count = deleted_submissions.count()\n deleted_submissions.delete()\n\n return count", "def DeleteResponseHeader(self, name):\n assert name.islower()\n self._wpr_response.original_headers = \\\n [x for x in self._wpr_response.original_headers if x[0].lower() != name]", "def process_response(request, response):\n # A higher middleware layer may return a request which does not contain\n # messages storage, so make no assumption that it will be there.\n if hasattr(request, '_events'):\n # noinspection PyProtectedMember\n unstored_events = request._events.update(response)\n if unstored_events and settings.DEBUG:\n raise ValueError('Not all temporary events could be stored.')\n return response", "def simulate_response(self, documents):", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def delete_session_records(self):\n self._session_records.reverse()\n self.builtin.log(\"Deleting {} records\".format(len(self._session_records)))\n for record in self._session_records[:]:\n self.builtin.log(\" Deleting {type} {id}\".format(**record))\n try:\n self.salesforce_delete(record[\"type\"], record[\"id\"])\n except SalesforceResourceNotFound:\n self.builtin.log(\" {type} {id} is already deleted\".format(**record))\n except Exception as e:\n self.builtin.log(\n \" {type} {id} could not be deleted:\".format(**record),\n level=\"WARN\",\n )\n self.builtin.log(\" {}\".format(e), level=\"WARN\")", "def process_incoming_response(self, response):\n # Validate the response.\n if not {\"__id\", \"__data\", \"__error\"}.issubset(iterkeys(response)):\n self.disconnect(\"Bad response received\")\n logger.warning(\"Response is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = response[\"__id\"]\n\n if id_ not in self.pending_outgoing_requests:\n logger.warning(\"No pending request with id %s found.\", id_)\n return\n\n request = self.pending_outgoing_requests.pop(id_)\n result = self.pending_outgoing_requests_results.pop(id_)\n error = response[\"__error\"]\n\n if error is not None:\n err_msg = \"%s signaled RPC for method %s was unsuccessful: %s.\" % (\n self.remote_service_coord, request[\"__method\"], error)\n logger.error(err_msg)\n result.set_exception(RPCError(error))\n else:\n result.set(response[\"__data\"])" ]
[ "0.6261296", "0.5926714", "0.5847716", "0.58347917", "0.5816505", "0.568727", "0.5628807", "0.55821556", "0.55515593", "0.5497828", "0.5445062", "0.5384468", "0.5301025", "0.52775407", "0.5272712", "0.52238524", "0.5213868", "0.51214164", "0.51052916", "0.50793844", "0.5073877", "0.5073877", "0.5070564", "0.50572306", "0.5054251", "0.50480604", "0.5043665", "0.5029975", "0.50294036", "0.5027412" ]
0.7417818
0
Send a list of records to SQS, batching as necessary
def send(self, payloads): records = self._payload_messages(payloads) # SQS only supports up to 10 messages so do the send in batches for message_batch in self._message_batches(records): response = self._send_messages(message_batch) self._finalize(response, message_batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch", "def put_records_batch(\n client, stream_name: str, records: list, max_retries: int, max_batch_size: int = 500\n) -> None or List[dict]:\n\n retry_list = []\n\n for batch_index, batch in enumerate(split_list(records, max_batch_size)):\n records_to_send = create_records(batch)\n retries_left = max_retries\n\n while len(records_to_send) > 0:\n kinesis_response = client.put_records(\n Records=records_to_send, StreamName=stream_name,\n )\n\n if kinesis_response[\"FailedRecordCount\"] == 0:\n break\n else:\n index: int\n record: dict\n for index, record in enumerate(kinesis_response[\"Records\"]):\n if \"ErrorCode\" in record:\n # original records list and response record list have same order, guaranteed:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records\n logger.error(\n f\"A record failed with error: {record['ErrorCode']} {record['ErrorMessage']}\"\n )\n retry_list.append(records_to_send[index])\n\n records_to_send = retry_list\n retry_list = []\n\n if retries_left == 0:\n error_msg = (\n f\"No retries left, giving up on records: {records_to_send}\"\n )\n logger.error(error_msg)\n return records_to_send\n\n retries_left -= 1\n\n logger.info(f\"Waiting 500 ms before retrying\")\n time.sleep(0.5)\n\n return None", "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _send_messages_helper(entries):\n \"\"\"Inner helper function for sending messages with backoff_handler\n\n Args:\n entries (list<dict>): List of SQS SendMessageBatchRequestEntry items\n \"\"\"\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response\n\n message_entries = [\n {\n 'Id': str(idx),\n 'MessageBody': message\n } for idx, message in enumerate(batched_messages)\n ]\n\n # The try/except here is to catch any raised errors at the end of the backoff\n try:\n return _send_messages_helper(message_entries)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('SQS request failed')\n # Use the current length of the message_entries in case some records were\n # successful but others were not\n self._log_failed(len(message_entries))\n return", "def send_messages_to_ks(records: List[str], stream_name: str):\n log.info('Sending message to Kinesis Stream')\n client = boto3.client('kinesis')\n return client.put_records(\n Records=[\n {\n 'Data': record + '\\n',\n 'PartitionKey': '1'\n } for record in records],\n StreamName=stream_name\n )", "def batch_push(self, payloads):\n body = json.dumps(payloads)\n\n status, response = self._request('POST', body, BATCH_PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def _send_batch(self, service_checks: list):\n for service_check in service_checks:\n self._send(service_check)", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def send_to_all(apigatewaymanagementapi, connection_ids, data):\n dynamodb = boto3.client('dynamodb')\n for connection_id in connection_ids:\n try:\n apigatewaymanagementapi.post_to_connection(Data=data, ConnectionId=connection_id['connectionId']['S'])\n except Exception as e:\n print(e)\n # Remove connection id from DDB\n dynamodb.delete_item(\n TableName=os.environ.get('CONNECTION_TABLE_NAME'),\n Key={'connectionId': {'S': connection_id['connectionId']['S']}}\n )", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def _dispatch_batches(self, base_url, endpoint, item_list, prep_args, dataset_id=None, dataset_version=None):\n pool = ThreadPool(processes=self.pool_size)\n batch = []\n\n # Decide which _prep function to use based on the endpoint\n if endpoint == 'import' or endpoint == 'import-events':\n prep_function = Mixpanel._prep_event_for_import\n elif endpoint == 'engage' or endpoint == 'import-people':\n prep_function = Mixpanel._prep_params_for_profile\n else:\n Mixpanel.LOGGER.warning(\n 'endpoint must be \"import\", \"engage\", \"import-events\" or \"import-people\", found: ' + str(endpoint))\n return\n\n if base_url == self.BETA_IMPORT_API:\n batch_size = 1000\n else:\n batch_size = 50\n\n for item in item_list:\n if prep_args is not None:\n # Insert the given item as the first argument to be passed to the _prep function determined above\n prep_args[0] = item\n params = prep_function(*prep_args)\n if params:\n batch.append(params)\n else:\n batch.append(item)\n\n if len(batch) == batch_size:\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n batch = []\n\n # If there are fewer than batch_size updates left ensure one last call is made\n if len(batch):\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n pool.close()\n pool.join()", "def batch_process(self, message_list, action, userId='me'):\n\n list_of_ids = []\n\n for key, value in message_list.items():\n list_of_ids.append(value)\n\n chunks = [list_of_ids[x:x+1000] for x in range(0, len(list_of_ids), 1000)]\n\n for page in range(0, len(chunks)):\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'batchModify')\n body = { \n \"ids\": chunks[page],\n \"removeLabelIds\": [\"INBOX\"],\n }\n else:\n resource = getattr(self.connection.users().messages(), 'batchDelete')\n body = { \n \"ids\": chunks[page],\n }\n\n dynamic_request = resource(userId=userId, body=body)\n response = dynamic_request.execute()\n print(f'[√] Bulk Action: SUCCESS {len(chunks[page])} Messages have been {action}d! - {page}')\n print(f'[√] Bulk Action: SUCCESS Total Number of Processed Messages: {len(list_of_ids)}')\n return True", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def batch(self, reqs):\n return self.connection.batch_(reqs)", "def ExecuteBatchQueue(self):\n\t\tself.client.ExecuteBatch(self.batch_queue, 'https://www.google.com/m8/feeds/contacts/default/full/batch')\n\t\tself.ClearBatchQueue();", "def apns_send_bulk_message(registration_ids, data, **kwargs):\n\tsocket = _apns_create_socket(APNS_SOCKET)\n\tfor registration_id in registration_ids:\n\t\t_apns_send(registration_id, data, socket=socket, **kwargs)\n\n\tsocket.close()", "async def mass_send(self, messages: List[Sms]) -> List[int]:\n raise NotImplementedError", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def send_message_bulk(session_ids, message, status=200):\n for session_id in session_ids:\n TalkBackEvent.from_session_id(session_id).send_message(message, status)", "def send_batch(cls, subject, body, recipients, chunk_size=settings.MAILGUN_BATCH_CHUNK_SIZE):\n\n body, recipients = cls._recipient_override(body, recipients)\n responses = []\n\n recipients = iter(recipients)\n chunk = list(islice(recipients, chunk_size))\n while len(chunk) > 0:\n params = dict(\n to=chunk,\n subject=subject,\n text=body\n )\n params['recipient-variables'] = json.dumps({email: {} for email in chunk})\n responses.append(cls._mailgun_request(requests.post, 'messages', params))\n chunk = list(islice(recipients, chunk_size))\n\n return responses", "def put_ids_to_queue(ids_list):\n LOGGER.debug('pushing %s ads to the queue', len(ids_list))\n for advert_id in ids_list:\n fetch_single_advert.delay(advert_id)", "def flush_batch(self, batch):\n inserts = []\n replacements = []\n\n for action_type, data in batch:\n if action_type == processor.INSERT:\n inserts.append(data)\n elif action_type == processor.REPLACE:\n replacements.append(data)\n\n if inserts:\n write_rows(\n self.clickhouse,\n self.dist_table_name,\n inserts\n )\n\n if self.metrics:\n self.metrics.timing('inserts', len(inserts))\n\n if replacements:\n for key, replacement in replacements:\n self.producer.produce(\n self.replacements_topic,\n key=six.text_type(key).encode('utf-8'),\n value=json.dumps(replacement).encode('utf-8'),\n on_delivery=self.delivery_callback,\n )\n\n self.producer.flush()", "def send_to_kafka(rows):\n producer = connect_kafka_producer()\n for row in rows:\n print(row.asDict())\n producer.send(TOPIC_NAME, value=row.asDict())\n producer.flush()", "def beat_inbox_email_bulk():\n receipt_id_email, list_of_email_notifications = email_bulk.poll()\n\n while list_of_email_notifications:\n save_emails.apply_async((None, list_of_email_notifications, receipt_id_email), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: email receipt {receipt_id_email} sent to in-flight.\")\n receipt_id_email, list_of_email_notifications = email_bulk.poll()", "def process_data():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n input_file = urllib.unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key']).encode('utf-8')\n s3.download_file(input_bucket_name, input_file, input_file)\n output_file = os.path.join(output_dir, os.path.splitext(input_file)[0]+'.csv')\n parse_patient_data(input_file, output_file)\n upload_data(output_file)\n cleanup_files(input_file, output_file)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def bulk_index_records(records):\n indexer = RecordIndexer()\n\n click.echo('Bulk indexing {} records...'.format(len(records)))\n indexer.bulk_index([str(r.id) for r in records])\n indexer.process_bulk_queue()\n click.echo('Indexing completed!')", "def queue_emails(spoofs, message, count):\n\t\tqueues = []\n\t\tnumber_of_spoofs = len(spoofs)\n\t\tmessages_per_queue = count // number_of_spoofs\n\t\textra_to_distribute = count - (messages_per_queue * number_of_spoofs)\n\t\tbatch = Batch(size=count, complete=0)\n\t\tbatch.save()\n\t\tpk = batch.pk\n\n\t\t# going deep into each queue\n\t\tfor x in range(number_of_spoofs):\n\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = Queue(spoof.username, connection=Redis())\n\t\t\tqueues.append(queue)\n\n\t\t\tfor y in range(messages_per_queue):\n\t\t\t\tqueue.enqueue_call(func=send, args=spoof.task_arguments + (message, pk))\n\n\t\t# panning across each queue\n\t\tfor x in range(extra_to_distribute):\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = queues[x]\n\t\t\tqueue.enqueue_call(func=send ,args=(spoof.task_arguments + (message, pk)))\n\n\t\treturn pk", "def do_bulk(self, args):\n pass", "def postponed_send(self):\n\n for event in self._event_list:\n self._http_post([event], postpone=True)\n\n # clear event_list for future use\n self._event_list = []", "def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)" ]
[ "0.6739715", "0.66852796", "0.6401511", "0.63491935", "0.6338009", "0.63261616", "0.63134557", "0.63052964", "0.6195632", "0.6183819", "0.6173889", "0.61398053", "0.6103908", "0.6066584", "0.6057908", "0.60467565", "0.6021644", "0.6018628", "0.5985069", "0.5927403", "0.5885165", "0.5869879", "0.5855847", "0.5767411", "0.5758047", "0.57554823", "0.57550955", "0.57363623", "0.5708484", "0.5701976" ]
0.7458315
0
Method to add a user as friends that is, to create a bidirectional link that connects the two users.
def add_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 0: self.users_hat[user1_index, user2_index] = 1 elif self.is_verbose(): self.log(f"User {user2_index} was already following user {user1_index}") if self.users_hat[user2_index, user1_index] == 0: self.users_hat[user2_index, user1_index] = 1 elif self.is_verbose(): self.log(f"User {user1_index} was already following user {user2_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def addFriendship(self, userID, friendID):\n # adding a edge between two vertices\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))", "async def add(\n self,\n\t\tuser_id: Optional[int] = None,\n\t\ttext: Optional[str] = None,\n\t\tfollow: Optional[bool] = None,\n\t\t**kwargs\n ) -> friends.AddResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.add\", params)\n model = friends.AddResponse\n return model(**response).response", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def test_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n self.assertIs(u in f.friends.all(), True)\n self.assertIs(f in u.friends.all(), True)", "def add_friend(request, profile_pk, friend_pk):\n\n profile_object = Profile.objects.get(pk=profile_pk)\n friend_object = profile_object.get_friend_suggestions().get(pk=friend_pk)\n \n profile_object.friends.add(friend_object)\n profile_object.save()\n\n return redirect(reverse('show_profile_page', kwargs={'pk': profile_pk}))", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True", "def add_friend(self, account):\n if not account in self.friends.all():\n self.friends.add(account)\n self.save()", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def add_friend(self, account):\n\t\tif not account in self.friends.all():\n\t\t\tself.friends.add(account)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def accept(self):\n receiver_friend_list = FriendList.objects.get(user=self.receiver)\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender)\n sender_friend_list = FriendList.objects.get(user=self.sender)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver)\n self.is_active = False\n self.save()" ]
[ "0.7621462", "0.7019252", "0.69413483", "0.69305646", "0.69305646", "0.69305646", "0.6898999", "0.68216527", "0.67973375", "0.67016155", "0.6672114", "0.6542767", "0.6497178", "0.6497178", "0.6497178", "0.6497178", "0.6441999", "0.6441999", "0.6441999", "0.6441999", "0.6438662", "0.64343077", "0.6427127", "0.6427127", "0.6396734", "0.629882", "0.6285157", "0.616015", "0.6052063", "0.6042031" ]
0.7181159
1
Render the Lilypond music expression lily using lilypond.
def render_lily(self, lily): shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'lily', shasum) outfn = path.join(self.builder.outdir, '_images', 'lily', shasum) if path.isfile(outfn): return relfn if hasattr(self.builder, '_lilypng_warned'): return None, None music = DOC_HEAD + self.builder.config.pnglily_preamble + lily if isinstance(music, unicode): music = music.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir_lily) if not hasattr(self.builder, '_lilypng_tempdir'): tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypng_tempdir tf = open(path.join(tempdir, 'music.ly'), 'w') tf.write(music) tf.close() ensuredir(path.dirname(outfn)) # use some standard lilypond arguments lilypond_args = [self.builder.config.pnglily_lilypond] #lilypond_args += ['-o', tempdir, '--png'] lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts', '-o', tempdir, '--png'] # add custom ones from config value lilypond_args.extend(self.builder.config.pnglily_lilypond_args) # last, the input file name lilypond_args.append(path.join(tempdir, 'music.ly')) try: p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('lilypond command %r cannot be run (needed for music ' 'display), check the pnglily_lilypond setting' % self.builder.config.pnglily_lilypond) self.builder._lilypng_warned = True return None, None stdout, stderr = p.communicate() if p.returncode != 0: raise LilyExtError(u'lilypond exited with error:\n[stderr]\n%s\n' '[stdout]\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8'))) shutil.copyfile(path.join(tempdir, 'music.png'), outfn) #Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE) return relfn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self, format=\"png\"):\n from .core.transforms import lilypond\n seq = HSeq(self) | lilypond()\n\n lily_output = write_lilypond.lily_format(seq)\n if not lily_output.strip():\n #In the case of empty lily outputs, return self to get a textual display\n return self\n\n if format == \"png\":\n suffix = \".preview.png\"\n args = [\"lilypond\", \"--png\", \"-dno-print-pages\", \"-dpreview\"]\n elif format == \"svg\":\n suffix = \".preview.svg\"\n args = [\"lilypond\", \"-dbackend=svg\", \"-dno-print-pages\", \"-dpreview\"]\n\n f = tempfile.NamedTemporaryFile(suffix=suffix)\n basename = f.name[:-len(suffix)]\n args.extend([\"-o\" + basename, \"-\"])\n\n #Pass shell=True so that if your $PATH contains ~ it will\n #get expanded. This also changes the way the arguments get\n #passed in. To work correctly, pass them as a string\n p = sp.Popen(\" \".join(args), stdin=sp.PIPE, shell=True)\n stdout, stderr = p.communicate(\"{ %s }\" % lily_output)\n if p.returncode != 0:\n # there was an error\n #raise IOError(\"Lilypond execution failed: %s%s\" % (stdout, stderr))\n return None\n\n if not ipython:\n return f.read()\n if format == \"png\":\n return Image(data=f.read(), filename=f.name, format=\"png\")\n else:\n return SVG(data=f.read(), filename=f.name)", "def exec_lilypond(ly_string, filename, command):\n ly_string = '\\\\version \"2.10.33\"\\n' + ly_string\n if filename[-4:] in [\".pdf\", \".png\"]:\n filename = filename[:-4]\n try:\n f = open(filename + \".ly\", \"w\")\n f.write(ly_string)\n f.close()\n except:\n return False\n command = 'lilypond %s -dresolution=600 -o \"%s\" \"%s.ly\"' % (command, filename, filename)\n #print(\"Executing: %s\" % command)\n p = subprocess.Popen(command, shell=True).wait()\n os.remove(filename + \".ly\")\n return True", "def render(self):\n canvas_id = 'zdog_{}'.format(self.CANVAS_INDEX)\n illo_id = 'illo_{}'.format(self.CANVAS_INDEX)\n Scene.CANVAS_INDEX += 1\n\n html_lines = []\n\n js_lines = []\n\n euler = -rowan.to_euler(\n self.rotation, convention='xyz', axis_type='intrinsic')\n translation = self.translation*(1, -1, 1)\n\n pan_cfg = self.get_feature_config('pan')\n pan = pan_cfg.get('value', True) if pan_cfg is not None else False\n\n js_lines.append(\"\"\"\n let {illo_id} = new Zdog.Illustration({{\n element: '#{canvas_id}',\n zoom: {zoom},\n dragRotate: {rotation_enabled},\n rotate: {{x: {angle[0]}, y: {angle[1]}, z: {angle[2]}}},\n translate: {{x: {pos[0]}, y: {pos[1]}, z: {pos[2]}}},\n }});\n \"\"\".format(\n illo_id=illo_id, canvas_id=canvas_id, zoom=self.zoom*self.pixel_scale,\n angle=euler, pos=translation,\n rotation_enabled=('false' if pan else 'true')))\n\n config = self.get_feature_config('ambient_light')\n ambient_light = 0 if config is None else config.get('value', .4)\n\n config = self.get_feature_config('directional_light')\n directional_light = ([(0, 0, 0)] if config is None else\n config.get('value', [(0, 0, 0)]))\n directional_light = np.atleast_2d(directional_light)\n\n shapeIndex = 0\n for i, prim in enumerate(self._primitives):\n js_lines.extend(prim.render(\n rotation=self.rotation, illo_id=illo_id,\n name_suffix=i, ambient_light=ambient_light,\n directional_light=directional_light))\n\n (width, height) = map(int, self.size_pixels)\n html_lines.append(\"\"\"\n <canvas id=\"{canvas_id}\" width=\"{width}\" height=\"{height}\"></canvas>\n \"\"\".format(canvas_id=canvas_id, width=width, height=height))\n\n html_lines.append(\"\"\"<script>\n var fill_{canvas_id} = function() {{\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(LOCAL_HELPER_SCRIPT)\n html_lines.extend(js_lines)\n\n pan_snippet = \"\"\"\n new Zdog.Dragger({{\n startElement: {illo_id}.element,\n onDragStart: function( pointer, moveX, moveY) {{\n this.lastX = 0;\n this.lastY = 0;\n }},\n onDragMove: function( pointer, moveX, moveY ) {{\n let deltax = moveX - this.lastX;\n let deltay = moveY - this.lastY;\n let scale = 1.0/{illo_id}.zoom;\n {illo_id}.translate.x += deltax*scale;\n {illo_id}.translate.y += deltay*scale;\n this.lastX = moveX;\n this.lastY = moveY;\n }}\n }});\"\"\".format(illo_id=illo_id)\n if pan:\n html_lines.append(pan_snippet)\n\n html_lines.append(\"\"\"\n let this_canvas = document.querySelector(\"#{canvas_id}\");\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(\"\"\"\n let animate_{canvas_id} = function() {{\n if(is_in_view(this_canvas))\n {{\n {illo_id}.updateRenderGraph();\n }}\n if(document.contains(this_canvas))\n {{\n requestAnimationFrame(animate_{canvas_id});\n }}\n }};\n animate_{canvas_id}();\"\"\".format(canvas_id=canvas_id, illo_id=illo_id))\n # remove the global reference to this function after using it\n html_lines.append('fill_{canvas_id} = null;'.format(canvas_id=canvas_id))\n html_lines.append('};') # end of fill_{canvas_id}\n # now call fill_{canvas_id}, possibly after loading zdog\n html_lines.append(\"\"\"\n if (typeof Zdog == 'undefined')\n {{\n var script = document.createElement('script');\n script.addEventListener('load', fill_{canvas_id}, false);\n script.src = 'https://unpkg.com/zdog@1/dist/zdog.dist.min.js';\n document.getElementsByTagName('head')[0].appendChild(script);\n }}\n else\n fill_{canvas_id}();\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append('</script>')\n\n return '\\n'.join(html_lines)", "def render(self) -> None: # pragma: no cover\n top_level_dir = self.rendering_params['dir']\n now = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S,%f\")\n nested_dir = os.path.join(top_level_dir, f\"result_{now}\")\n os.mkdir(nested_dir)\n\n midi_path = os.path.join(nested_dir, 'music.mid')\n midi_params = self.rendering_params['midi']\n measure = self.rendering_params['measure_in_seconds']\n create_midi_from_piece(self, midi_path, measure, **midi_params)\n\n events_path = os.path.join(nested_dir, 'sinethesizer_events.tsv')\n events_params = self.rendering_params['sinethesizer']\n create_events_from_piece(self, events_path, measure, **events_params)\n\n wav_path = os.path.join(nested_dir, 'music.wav')\n create_wav_from_events(events_path, wav_path)\n\n lilypond_path = os.path.join(nested_dir, 'sheet_music.ly')\n create_lilypond_file_from_piece(self, lilypond_path)\n create_pdf_sheet_music_with_lilypond(lilypond_path)", "def asLily(self):\n n = self.getNoteName()[0].lower()\n a = self._getLilyAccidental()\n o = self._getLilyOctave()\n d = self._getLilyDuration()\n s = self._getLilyDot()\n t = self._getLilyTie()\n return \"{}{}{}{}{}{}\".format(n, a, o, d, s, t)", "def hxlexpand():\n run_script(hxlexpand_main)", "def playOutput():\n global coordinates, lastPlayedCoordinates\n\n tempDir = \".bt_temp\"\n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n\n if (coordinates == []):\n return\n\n # If there have been no changes to the canvas, don't recreate the .wav files\n if (coordinates == lastPlayedCoordinates):\n if os.path.isfile(tempSongPath):\n call(['python','PlayMelody.py',tempSongPath])\n return\n\n lex = Lexer(coordinates)\n song = lex.compose_song()\n \n # Don't create a sub directory and just make them hidden files, this way no permission error\n\n # Delete the old one if it exists\n if os.path.exists(tempDir):\n shutil.rmtree(tempDir)\n # Create temporary directory to store intermediate files\n os.makedirs(tempDir)\n \n \n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n if os.path.exists(tempSongPath):\n shutil.rmtree(tempSongPath)\n\n createMelody(song, tempSongPath)\n\n call(['python','PlayMelody.py',tempSongPath])\n\n lastPlayedCoordinates = coordinates", "def render_example() -> str:\n return str(<Hello name=\"World\"/>)", "def render_ldl(variables, output):\n\n f = open(output, 'w')\n\n # Include header\n f.write(\"#include \\\"ldl.h\\\"\\n\\n\")\n\n # Write ldl_lsolve\n write_ldl_lsolve(f, variables)\n\n # Write ldl_ltsolve\n write_ldl_ltsolve(f, variables)\n\n # Write ldl_dinvsolve\n write_ldl_dinvsolve(f, variables)\n\n # Write ldl_perm\n write_ldl_perm(f, variables)\n\n # Write ldl_permt\n write_ldl_permt(f, variables)\n\n f.close()", "def bidi_streaming(self) -> global___Snippet.BidiStreaming:", "def add_song():\n return render_template('pong!')", "def ly(self, l: int, lfrac: float) -> float:\n self._check_lfrac(lfrac)\n self._raise_if_not_line(l)\n result = self._read_inline(f\"ly({l},{lfrac})\")\n return result", "def render(sim_file: str, only_stuck: bool) -> None:\n import DLA\n DLA.GREEN = (0, 0, 0) # type: ignore\n DLA.WHITE = (151, 151, 151, 150) # type: ignore\n from DLA import config\n config.USE_PYGAME = True # type: ignore\n from DLA import renderer\n renderer.render(Path(sim_file), only_stuck)", "def midi_to_lilypond_note(note):\n return all_notes[note+4]", "def create_artist_new_music_line(spotify_artist_music):\n body = ''\n for item in spotify_artist_music:\n if item['thumbnail']:\n artist_string = '<p><img src=\"{}\" width=\"{}\" height=\"{}\" /> {} released on {}--{}</p>\\n'\n body += artist_string.format(item['thumbnail'][0]['url'], item['thumbnail'][0]['width'],\n item['thumbnail'][0]['height'], item['name'], item['releaseDate'], item['url'])\n return body", "def all_notes():\n \n return render_template('all_notes.html',colors=music_color,)", "def __init__(self):\n inkex.Effect.__init__(self)\n\n self.doc_center = None\n self.normal_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1' # can also be in form '2mm'\n }\n self.cut_line = {\n 'stroke': '#ff0000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '0.1' # can also be in form '2mm'\n }\n self.doted_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1', # can also be in form '2mm'\n 'stroke-linecap': 'butt',\n 'stroke-linejoin': 'miter',\n 'stroke-miterlimit': '10',\n 'stroke-dasharray': '9.883,9.883',\n 'stroke-dashoffset': '0'\n }\n\n # Define the list of parameters defined in the .inx file\n self.OptionParser.add_option(\"-t\", \"--type\", type=\"string\", dest=\"type\", default='perso',\n help=\"Type of template rendered\")\n self.OptionParser.add_option(\"-u\", \"--units\", type=\"string\", dest=\"units\", default='cm',\n help=\"User interface units\")\n self.OptionParser.add_option(\"--style\", type=\"string\", dest=\"style\", default='print',\n help=\"Style of the template\")\n self.OptionParser.add_option(\"-n\", \"--neck\", type=\"float\", dest=\"neck\", default=11,\n help=\"Width of the neck\")\n self.OptionParser.add_option(\"-s\", \"--shoulder\", type=\"float\", dest=\"shoulder\", default=44,\n help=\"Width shoulder to shoulder\")\n self.OptionParser.add_option(\"--hip\", type=\"float\", dest=\"hip\", default=89,\n help=\"Hip measurement\")\n self.OptionParser.add_option(\"-w\", \"--waist\", type=\"float\", dest=\"waist\", default=79,\n help=\"Waist measurement\")\n self.OptionParser.add_option(\"-c\", \"--chest\", type=\"float\", dest=\"chest\", default=97,\n help=\"Chest measurement\")\n self.OptionParser.add_option(\"--hsptochest\", type=\"float\", dest=\"hsp_chest\", default=21,\n help=\"Lenght HSP to chest\")\n self.OptionParser.add_option(\"--hsptowaist\", type=\"float\", dest=\"hsp_waist\", default=45,\n help=\"Lenght HSP to waist\")\n self.OptionParser.add_option(\"--hsptohip\", type=\"float\", dest=\"hsp_hip\", default=67,\n help=\"Lenght HSP to hip\")\n self.OptionParser.add_option(\"-b\", \"--bicep\", type=\"float\", dest=\"bicep\", default=23,\n help=\"Bicep measurement\")\n self.OptionParser.add_option(\"--upersleeve\", type=\"float\", dest=\"top_sleeve\", default=20,\n help=\"Top lenght of the sleeve\")\n self.OptionParser.add_option(\"--bottomsleeve\", type=\"float\", dest=\"bottom_sleeve\", default=17,\n help=\"Bottom lenght of the sleeve\")\n self.OptionParser.add_option(\"-e\", \"--ease\", type=\"float\", dest=\"ease\", default=5,\n help=\"Amount of ease\")\n self.OptionParser.add_option(\"--neck_front\", type=\"float\", dest=\"neck_front\", default=0,\n help=\"Height of the front neck drop\")\n self.OptionParser.add_option(\"--neck_rear\", type=\"float\", dest=\"neck_rear\", default=6,\n help=\"Height of the rear neck drop\")\n self.OptionParser.add_option(\"--shoulder_drop\", type=\"float\", dest=\"shoulder_drop\", default=3,\n help=\"height of the shoulder\")\n self.OptionParser.add_option(\"--grid\", type=\"inkbool\", dest=\"grid\", default=True,\n help=\"Display the Reference Grid \")\n self.OptionParser.add_option(\"--temp\", type=\"inkbool\", dest=\"temp\", default=True,\n help=\"Display the template\")\n self.OptionParser.add_option(\"--active-tab\", type=\"string\", dest=\"active_tab\",\n default='title', help=\"Active tab.\")", "def drawMusicLines():\n global c\n c.create_line(0 , 3, 800, 3, width=2)\n c.create_line(0 , 79, 800, 79, width=2)\n c.create_line(0 , 159, 800, 159, width=2)\n c.create_line(0 , 239, 800, 239, width=2)\n c.create_line(0 , 319, 800, 319, width=2)\n c.create_line(799 , 0, 799, 320, width=6)\n c.create_line(790 , 0, 790, 320, width=2)\n c.create_line(3, 0, 3, 320, width=2)", "def dspyRender(self):\n pass", "def bar_to_lilypond_notes(notes):\n lp_notes = []\n if notes[0] is None:\n notes = notes[1:]\n if notes[0] is None:\n lp_notes.append(\"r\")\n for n in notes:\n if n is None:\n continue\n if type(n) is list:\n lp_notes.append([midi_to_lilypond_note(x) for x in n])\n else:\n lp_notes.append(midi_to_lilypond_note(n))\n return lp_notes", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def render(self):", "def mj_render(self):\n self.render(mode='human')", "def litchi(args):\n p = OptionParser(litchi.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x6\")\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n datafile, bedfile, slayout, switch = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.4, 0.7, 0.82)\n\n # On the left panel, make a species tree\n fc = \"lightslategrey\"\n\n coords = {}\n xs, xp = 0.16, 0.03\n coords[\"lychee\"] = (xs, 0.37)\n coords[\"clementine\"] = (xs, 0.5)\n coords[\"cacao\"] = (xs, 0.6)\n coords[\"strawberry\"] = (xs, 0.7)\n coords[\"grape\"] = (xs, 0.8)\n xs -= xp\n coords[\"Sapindales\"] = join_nodes(root, coords, \"clementine\", \"lychee\", xs)\n xs -= xp\n coords[\"Rosid-II\"] = join_nodes(root, coords, \"cacao\", \"Sapindales\", xs)\n xs -= xp\n coords[\"Rosid\"] = join_nodes(root, coords, \"strawberry\", \"Rosid-II\", xs)\n xs -= xp\n coords[\"crown\"] = join_nodes(root, coords, \"grape\", \"Rosid\", xs, circle=False)\n\n # Names of the internal nodes\n for tag in (\"Rosid\", \"Rosid-II\", \"Sapindales\"):\n nx, ny = coords[tag]\n nx, ny = nx - 0.01, ny - 0.02\n root.text(nx, ny, tag, rotation=90, ha=\"right\", va=\"top\", color=fc)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"litchi\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def _latex_(self):\n p = self._weight_rat.numer()\n q = self._weight_rat.denom()\n old = s = \"\\\\begin{verbatim}\\\\end{verbatim}\"\n new = \"\"\n # s=\"\\\\text{Space of Vector-Valued harmonic weak Maass forms on }\"\n # s+=latex(self.multiplier().group)+\" \\\\text{ of weight } \\\\frac{\"+str(p)+\"}{\"+str(q)+\"}\"\n # s+=\"\\\\text{and values in } \\\\mathbb{C}\\\\left[\\\\mathbb{Z}/\"+latex(2*self.multiplier().N)+\"\\\\mathbb{Z}\\\\right]\\\\text{.}\"\n # s+=\"$ \\\\text{ The representation is }\"+latex(self.multiplier())+\"\\\\text{.}\"\n s = \"\\\\begin{verbatim}\\\\end{verbatim}\"\n s += \" Space of Vector-Valued harmonic weak Maass forms on $\"\n s += latex(self.multiplier().group()) + \"$ of weight $\\\\frac{\" + str(p) + \"}{\" + str(q) + \"}$\"\n s += \"and values in $\\\\mathbb{C}\\\\left[\\\\mathbb{Z}/\" + latex(2 * self.multiplier().N) + \"\\\\mathbb{Z}\\\\right]$. \"\n s += \"The representation is \" + self.multiplier()._latex_().replace(old, new) + \".\"\n\n return s", "def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')", "def bands_nlp() -> Language:\n nlp = English()\n nlp.add_pipe(BandNameNerPipe(nlp, MusicBand.select(), \"band_ents\"))\n\n return nlp", "def render(self,screen):\n for boids in self.boid_list:\n boids.render(screen)", "def render(self, position, dimensions, filename):\n t = position[0]\n volume = np.zeros(dimensions,dtype=np.uint8)\n # render it!\n print(t, len(self.frames), self.frames)\n for i,f in enumerate(self.frames):\n f.render( position[1:],volume[t+i,:,:] )\n # save it\n tiff.imsave(filename, volume)" ]
[ "0.62011886", "0.60662764", "0.5725134", "0.5563053", "0.5401194", "0.52068543", "0.51799726", "0.51762205", "0.5175516", "0.5059088", "0.4895712", "0.4883887", "0.48677018", "0.4829131", "0.47872004", "0.4744463", "0.47235727", "0.4710764", "0.47090602", "0.4685067", "0.46768898", "0.46594623", "0.46565187", "0.46457106", "0.46297425", "0.46215582", "0.46142733", "0.46125826", "0.45717314", "0.45708984" ]
0.7580346
0
This function places an order for "context.index" in the amount required to neutralize the beta exposure of the portfolio. Note that additional leverage in the account is taken on, however, net market exposure is reduced.
def hedge_portfolio(context, data): factors = get_alphas_and_betas(context, data) beta_exposure = 0.0 count = 0 for asset in context.portfolio.positions: if asset in factors and asset != context.index: if not np.isnan(factors[asset].beta): beta_exposure += factors[asset].beta count += 1 beta_hedge = -1.0 * beta_exposure / count dollar_amount = context.portfolio.portfolio_value * beta_hedge record(beta_hedge=beta_hedge) if not np.isnan(dollar_amount): order_target_value(context.index, dollar_amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas_and_betas(context, data):\r\n all_assets = context.portfolio.positions.keys()\r\n if context.index not in all_assets:\r\n all_assets.append(context.index)\r\n prices = data.history(all_assets, 'price', context.lookback, '1d')\r\n returns = prices.pct_change()[1:]\r\n # index_returns = returns[context.index]\r\n factors = {}\r\n for asset in context.portfolio.positions:\r\n try:\r\n y = returns[asset]\r\n factors[asset] = linreg(returns[context.index], y)\r\n except:\r\n log.warn(\"[Failed Beta Calculation] asset = %s\" % asset.symbol)\r\n return pd.DataFrame(factors, index=['alpha', 'beta'])", "def place_order(env, inventory_stock):\n yield env.timeout(LEAD_TIME)\n #amount = inventory_stock.capacity - inventory_stock.level\n amount = EOQ\n print('Inventory refilled by {1} products at {0} '.format(env.now, amount))\n print('Inventory Level = {}'.format(inventory_stock.capacity))\n order_arrival_time.append(env.now)\n order_amount.append(amount)\n yield inventory_stock.put(amount)", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def update_order_index(self, index=None):\n if index is None:\n index = getattr(self, \"current_order_index\", 0)\n\n session = self.parent.session\n self.current_order_index = index\n self.current_order \\\n = session.input_spectra[self.current_order_index].copy()\n\n # Apply any RV correction.\n try:\n v = session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n v = 0\n\n self.current_order._dispersion *= (1 - v/c)\n\n # Update the view if the input settings don't match the settings used\n # to normalize the current order.\n self.check_for_different_input_settings()\n\n return None", "def beta(self, index):\n index_change = index.close.pct_change()\n beta = self.pct_change.cov(index_change) / index_change.var()\n return beta", "def initialize(context):\n # Rebalance every day, 1 hour after market open.\n set_slippage(slippage.FixedSlippage(spread=0.00))\n set_commission(commission.PerShare(cost=0.0, min_trade_cost=0.0))\n context.lookback = 60\n context.leverage = 0.02\n context.day = 1\n #context.ETFs = []\n context.market = [symbol('SPY')]\n context.bo = 1.25\n context.so = 1.25\n context.bc = 0.75\n context.sc = 0.5\n context.stocks = []\n context.initialized = False\n context.holding_book_shares = None\n context.order_hist = {}\n \n context.xlb = symbol('XLB') #sid(19654) #Materials 101\n context.xly = symbol('XLY') #sid(19662) #Consumer Discretionary 102\n context.xlf = symbol('XLF') #sid(19656) #Financials 103\n context.xlre = symbol('IYR') #sid() #Real estate 104\n context.xlp = symbol('XLP') #sid(19659) #Consumer Staples 205\n context.xlv = symbol('XLV') #sid(19661) #Health Care 206\n context.xlu = symbol('XLU') #sid(19660) #Utilities 207\n context.xtl = symbol('IYZ') #sid() #Communication Services 308\n context.xle = symbol('XLE') #sid(19655) #Energy 309\n context.xli = symbol('XLI') #sid(19657) #Industrials 310\n context.xlk = symbol('XLK') #sid(19658) #Technology 311\n \n context.ETF_lookup = {context.xlb:101, 101:context.xlb,\n context.xly:102, 102:context.xly,\n context.xlf:103, 103:context.xlf,\n context.xlre:104, 104:context.xlre,\n context.xlp:205, 205: context.xlp,\n context.xlv:206, 206: context.xlv,\n context.xlu:207, 207:context.xlu,\n context.xtl:308, 308:context.xtl,\n context.xle:309, 309:context.xle,\n context.xli:310, 310:context.xli,\n context.xlk:311, 311:context.xlk}\n\n context.ETFs = [context.xlb,\n context.xly,\n context.xlf,\n context.xlre,\n context.xlp,\n context.xlv,\n context.xlu,\n context.xtl,\n context.xle,\n context.xli,\n context.xlk\n ]", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def CalcEffectiveInventory(self):\r\n return (self.currentStock - self.currentOrders)", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def gbce_index(self):\n stocks_vwsp = [Stock.get_instance().get_stock_by_symbol(tr.symbol).vwsp for tr in Trade.get_instance()]\n try:\n return (reduce(operator.mul, stocks_vwsp, 1)) ** (1.0/len(stocks_vwsp))\n except ZeroDivisionError:\n return 0.0", "def place_orders(context, data):\r\n log.info(\"*********Monthly flags: %s\" % context.flags)\r\n \r\n context.sell = []\r\n context.buy = []\r\n \r\n # Go through flags to determine buy/sell signals\r\n for asset, flags in context.flags.items():\r\n # If up > down and multiple blue flags, add to buy\r\n if flags['UP'] > flags['DOWN'] and flags['UP'] > 1:\r\n context.buy.append(asset)\r\n \r\n # If down > up and multiple down flags, add to sell\r\n elif flags['DOWN'] > flags['UP'] and flags['DOWN'] > 1:\r\n context.sell.append(asset)\r\n \r\n # If both SPY and QQQ are buys, rebalance weightings and check components\r\n if sid(8554) in context.buy and sid(19920) in context.buy:\r\n rebalance_weightings(context)\r\n \r\n # Reset down sequence\r\n context.first_down_sequence = set()\r\n \r\n # Reset SPY and QQQ to max weightings\r\n context.target_weights[sid(8554)] = context.max_weights[sid(8554)]\r\n context.target_weights[sid(19920)] = context.max_weights[sid(19920)]\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[sid(8554)] = round(context.target_weights[sid(8554)] * context.portfolio.portfolio_value / context.price[sid(8554)])\r\n context.target_shares[sid(19920)] = round(context.target_weights[sid(19920)] * context.portfolio.portfolio_value / context.price[sid(19920)])\r\n \r\n # If not overweighting:\r\n if not context.overweighting:\r\n context.buy.remove(sid(8554))\r\n context.buy.remove(sid(19920))\r\n \r\n # Check components\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio > 1, add to buy\r\n if asset != sid(8554) and asset != sid(19920) and ratio > 1:\r\n context.buy.append(asset)\r\n \r\n # If SPY is a sell, check UP ratios for components\r\n if sid(8554) in context.sell:\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio < 1, add to sell\r\n if asset != sid(8554) and asset != sid(19920) and ratio < 1:\r\n context.sell.append(asset)\r\n \r\n \r\n \r\n # First month at end August 2017: set all other assets to max weighting, except take UP ratio of JKL to be <1 so sell 20% of weighting\r\n if context.first_iteration:\r\n log.info('First iteration')\r\n \r\n # Initialise weightings\r\n rebalance_weightings(context)\r\n context.first_iteration = False\r\n \r\n for asset, weight in context.max_weights.items(): \r\n # JKL\r\n if asset == sid(26451):\r\n context.sell.append(asset)\r\n\r\n context.target_weights[asset] = weight\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n buy_overweight = []\r\n remaining_cash = context.portfolio.cash\r\n \r\n # Buy components first (before considering overweighting QQQ/SPY)\r\n for asset in sorted(context.buy, reverse=True):\r\n \r\n # This is an up sequence so no subsequent down sequence\r\n if asset in context.first_down_sequence:\r\n context.first_down_sequence.remove(asset) \r\n \r\n # Buy 50% of weighting\r\n log.info('UP flags for %s: Buy 50 percent' % asset)\r\n extra_weight = 0.5 * context.max_weights[asset]\r\n \r\n # Do not exceed max shares by weighting, UNLESS taking from cash from components (overweighting)\r\n if context.target_weights[asset] == context.max_weights[asset] or (context.target_weights[asset] > context.max_weights[asset] and context.overweighting):\r\n buy_overweight.append(asset)\r\n \r\n elif context.target_weights[asset] + extra_weight > context.max_weights[asset]:\r\n context.target_weights[asset] = context.max_weights[asset]\r\n \r\n else:\r\n context.target_weights[asset] += extra_weight\r\n \r\n # Convert weights to number of shares\r\n old_shares = context.target_shares[asset]\r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n remaining_cash -= (context.target_shares[asset] - old_shares) * context.price[asset]\r\n \r\n for asset in buy_overweight:\r\n if remaining_cash > 0:\r\n # If first overweight or 2 assets to be overweighted, take 50% of available cash\r\n if context.target_weights[asset] > context.max_weights[asset] or len(buy_overweight) > 1:\r\n log.info('Taking half of cash of value: %f' % (remaining_cash * 0.5))\r\n context.target_weights[asset] += 0.5 * remaining_cash / context.portfolio.portfolio_value\r\n \r\n # If second overweight, take all remaining cash\r\n else:\r\n log.info('Taking remaining of cash of value: %f' % (remaining_cash))\r\n context.target_weights[asset] += remaining_cash / context.portfolio.portfolio_value\r\n \r\n else:\r\n # If no cash, ignore\r\n log.info('UP flags for %s: No change' % asset)\r\n continue\r\n \r\n \r\n # For assets in sell list\r\n for asset in context.sell:\r\n \r\n # If asset already has 0 holdings, ignore\r\n if context.target_weights[asset] == 0:\r\n log.info('DOWN flags for %s: No change' % asset)\r\n continue\r\n \r\n # If first multiple down flags, sell 20% of UP weight\r\n elif asset not in context.first_down_sequence:\r\n log.info('First DOWN flags for %s: Sell 20 percent' % asset)\r\n context.target_weights[asset] -= 0.2 * context.max_weights[asset]\r\n context.first_down_sequence.add(asset)\r\n \r\n # If this is a subsequent down flag sequence, sell 40% of UP weight\r\n else:\r\n log.info('DOWN flags for %s: Sell 40 percent' % asset)\r\n context.target_weights[asset] -= 0.4 * context.max_weights[asset]\r\n \r\n # Ensure no short position\r\n if context.target_weights[asset] < 0:\r\n context.target_weights[asset] = 0\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n print(context.target_weights)", "def example_reward(self,stock=None, action=None, current_date = None, products=None, orders=None, procurements=None):\n\n out = 0\n for key in stock:\n out += stock[key]\n return out * -1", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def Rollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def blank_future_eta(request):\n today = datetime.datetime.today()\n today = today.date()\n\n orders = OrderDetail.objects.filter(eta__gt=today)\n for order in orders:\n order.eta = None\n order.save()\n\n return HttpResponse('ok', mimetype='text/plain')", "def prepare_order(self, index, order_status):\n if(self.running_qty > 0 and index > 0):\n quantity = self.running_qty\n price = self.get_price_offset3(index)\n elif(self.running_qty < 0 and index < 0):\n quantity = abs(self.running_qty)\n price = self.get_price_offset3(index)\n else:\n quantity = self.ORDER_START_SIZE // 4\n price = self.get_price_offset2(index)\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}", "def my_rebalance(context, data):\n freq_month = 3\n context.counter += 1\n if context.counter == freq_month:\n for stock, weight in context.weights.iteritems():\n context.counter = 0\n if data.can_trade(stock):\n order_target_percent(stock, weight)", "def rebalance(context, data):\n\n cancel_all_orders(context, data)\n sell_stocks_not_in_portfolio(context, data)\n\n LOG.info(\"rebalancing\")\n LOG.info(context.stocks)\n totals = calculate_totals(context, data)\n LOG.info(\"totals calculated: %s\" % totals)\n for stock, info in totals.items():\n order(stock, info[\"total\"])", "def bkg_subtract(self, analyte, bkg, ind=None):\n\n if 'bkgsub' not in self.data.keys():\n self.data['bkgsub'] = {}\n\n self.data['bkgsub'][analyte] = self.focus[analyte] - bkg\n\n if ind is not None:\n self.data['bkgsub'][analyte][ind] = np.nan\n\n return", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def __sell(self, order, portfolio):\n amount = order.price * order.volume\n portfolio.remove_stock(order.symbol, order.volume)\n portfolio.add_cash(amount)\n return True", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def removeFixedEffect(self, index=None):\n if self._n_terms==0:\n pass\n if index is None or index==(self._n_terms-1):\n\n self._n_terms-=1\n F = self._F.pop() #= self.F[:-1]\n A = self._A.pop() #= self.A[:-1]\n self._A_identity.pop() #= self.A_identity[:-1]\n REML_term = self._REML_term.pop()# = self.REML_term[:-1]\n self._B.pop()# = self.B[:-1]\n self._n_fixed_effs-=F.shape[1]*A.shape[0]\n if REML_term:\n self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]\n\n pass\n elif index >= self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n raise NotImplementedError(\"currently only last term can be removed\")\n pass\n self._rebuild_indicator()\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')", "def cool_balance(index):\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_cool_out])\n - pulp.lpSum([component_input[i, t] for i in index_cool_in])\n + pulp.lpSum([storage_disch[i, t] for i in heat_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in heat_storage_names])\n + cool_unserve[t]\n - cool_dump[t]\n == forecast[\"cool_load\"][t]\n )", "def order(self, index=None):\n bfsize = card(self.basefield)\n\n if not self.ord:\n if self.ch in (2, 3):\n if bfsize == self.ch == 2:\n self.ord = self._order_2()\n elif bfsize == self.ch == 3:\n self.ord = self._order_3()\n else:\n error_message = \"no E/F_{%d} order\" % bfsize\n raise NotImplementedError(error_message)\n else:\n self.ord = self._trace_to_order(self.trace())\n\n # final result\n if index:\n # for subfield curve\n basetrace = self._order_to_trace(self.ord)\n trace, oldtrace = basetrace, 2\n for i in range(2, index + 1):\n trace, oldtrace = basetrace*trace - bfsize*oldtrace, trace\n return bfsize ** index + 1 - trace\n\n return self.ord", "def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)" ]
[ "0.5514604", "0.5256099", "0.5154788", "0.50738245", "0.50134844", "0.5007411", "0.4994111", "0.49798325", "0.4962537", "0.4952547", "0.4933376", "0.49214765", "0.49075228", "0.49000627", "0.4870517", "0.48602587", "0.4830687", "0.48236924", "0.47850198", "0.47702926", "0.47414806", "0.46885774", "0.46875164", "0.4687351", "0.4671185", "0.464341", "0.4642237", "0.46415833", "0.46382055", "0.46232736" ]
0.55305976
0
returns a dataframe of 'alpha' and 'beta' exposures for each asset in the current universe.
def get_alphas_and_betas(context, data): all_assets = context.portfolio.positions.keys() if context.index not in all_assets: all_assets.append(context.index) prices = data.history(all_assets, 'price', context.lookback, '1d') returns = prices.pct_change()[1:] # index_returns = returns[context.index] factors = {} for asset in context.portfolio.positions: try: y = returns[asset] factors[asset] = linreg(returns[context.index], y) except: log.warn("[Failed Beta Calculation] asset = %s" % asset.symbol) return pd.DataFrame(factors, index=['alpha', 'beta'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas(portfolio_returns,risk_free,market_returns,betas):\r\n \r\n R = portfolio_returns\r\n Rf = risk_free\r\n Beta = betas\r\n Rm = market_returns\r\n alpha = R - Rf - (Beta*(Rm-Rf))\r\n \r\n return alpha", "def transparency(\n et: pd.DataFrame, alpha_by: Hashable, alpha_bounds: Optional[Tuple] = None\n) -> pd.Series:\n if alpha_by is not None:\n ref_data = et[alpha_by]\n if isinstance(alpha_bounds, tuple):\n ref_data = pd.Series(alpha_bounds)\n return encodings.data_transparency(et[alpha_by], ref_data)\n return pd.Series([0.1] * len(et), name=\"alpha\")", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def _calculate_data_quantiles(\n self, df: pd.DataFrame, alpha: List[float], legacy_interface=False\n ) -> pd.DataFrame:\n var_names = self._get_varnames(\n default=\"Quantiles\", legacy_interface=legacy_interface\n )\n var_name = var_names[0]\n\n index = pd.MultiIndex.from_product([var_names, alpha])\n pred_quantiles = pd.DataFrame(columns=index)\n for a in alpha:\n quant_a = df.groupby(level=-1, as_index=True).quantile(a)\n pred_quantiles[[(var_name, a)]] = quant_a\n\n return pred_quantiles", "def get_full_df(self):\n\n galaxies = []\n for i, gal_name in enumerate(self.filenames):\n g_df = self.galaxies[gal_name].all_particle_properties(\n ).to_pandas()\n g_df['name'] = self.names[i]\n g_df['snap'] = self.snaps[i]\n galaxies.append(g_df)\n return pd.concat(galaxies)", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def get_assets_data_frames(assets: list, asset_function: list, country: str, start_date: str, end_date: str) -> list:\r\n\r\n data_frames = []\r\n\r\n for asset in assets:\r\n\r\n data_frame = asset_function(asset,\r\n country=country,\r\n from_date=start_date,\r\n to_date=end_date)\r\n\r\n data_frames.append(data_frame)\r\n\r\n return data_frames", "def beta_and_alpha(self):\n # make scatter plot\n sp_temp = self.daily_returns(self.sp.rename(columns={'Adj Close': '^GSPC'}))\n symbol_temp = self.daily_returns(self.daily.rename(columns={'Adj Close': self.symbol}))\n joined = sp_temp.merge(symbol_temp, on='Date')\n\n # beta and alpha\n beta, alpha = np.polyfit(joined[\"^GSPC\"], joined[self.symbol], 1)\n beta = round(beta, 3)\n alpha = round(alpha, 5)\n if alpha > 0:\n self.buys += 1\n self.debug += '\\nAlpha > 0: buys + {}'.format(alpha)\n else:\n self.debug += '\\nAlpha < 0: {}'.format(alpha)\n\n # assuming favorable market conditions. else, it would be sells + 1.\n if beta > 1:\n self.buys += 1\n self.debug += '\\nBeta > 1: buys + {}'.format(beta)\n else:\n self.debug += '\\nBeta < 1: {}'.format(beta)\n\n # finish plotting scatter\n if self.will_plot:\n ax = joined.plot(title=self.symbol + ' vs The Market', kind = 'scatter', x='^GSPC', y=self.symbol)\n ax.set_xlabel(\"S&P 500\")\n plt.plot(joined[\"^GSPC\"], beta * joined['^GSPC'] + alpha, '-', color='r', label='Correlation')\n\n # plot expected beta (slope) of 1 and alpha (y- int.) of zero\n plt.plot(joined[\"^GSPC\"], 1 * joined['^GSPC'] + 0, '-', color='gray', label='Beta of 1')\n plt.plot(joined[\"^GSPC\"], 0 * joined['^GSPC'] + 0, '-', color='gray', label='Alpha of 0')\n plt.legend(loc='best')", "def to_abivars(self):\n abivars = dict(\n bs_calctype=1,\n bs_loband=self.bs_loband,\n #nband=self.nband,\n mbpt_sciss=self.mbpt_sciss,\n ecuteps=self.ecuteps,\n bs_algorithm=self._ALGO2VAR[self.algo],\n bs_coulomb_term=21,\n mdf_epsinf=self.mdf_epsinf,\n bs_exchange_term=1 if self.with_lf else 0,\n inclvkb=self.inclvkb,\n zcut=self.zcut,\n bs_freq_mesh=self.bs_freq_mesh,\n bs_coupling=self._EXC_TYPES[self.exc_type],\n optdriver=self.optdriver,\n )\n\n if self.use_haydock:\n # FIXME\n abivars.update(\n bs_haydock_niter=100, # No. of iterations for Haydock\n bs_hayd_term=0, # No terminator\n bs_haydock_tol=[0.05, 0], # Stopping criteria\n )\n\n elif self.use_direct_diago:\n raise NotImplementedError(\"\")\n\n elif self.use_cg:\n raise NotImplementedError(\"\")\n\n else:\n raise ValueError(\"Unknown algorithm for EXC: %s\" % self.algo)\n\n # Add extra kwargs\n abivars.update(self.kwargs)\n\n return abivars", "def alpha(requestContext, seriesList, alpha):\n for series in seriesList:\n series.options['alpha'] = alpha\n return seriesList", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def predict(self, alpha=0.05):\n assert 0 < alpha < 1\n predictions = self.predictions(model=self.model)\n if len(self.models) > 0:\n self.draws = np.vstack([\n self.predictions(model=mod) for mod in self.models\n ])\n # TODO: Make this work for n > 1 outcomes -- adjust axis\n return pd.DataFrame({\n 'mean': predictions[0],\n 'lower': np.quantile(self.draws, q=alpha/2, axis=0),\n 'upper': np.quantile(self.draws, q=1-alpha/2, axis=0)\n })\n else:\n return pd.DataFrame({\n 'mean': predictions[0]\n })", "def get_assets(self):\n findstr = r'W\\.iframeInit\\({\"assets\":(\\[.*\\])'\n try:\n page = str(requests.get(self.srcpage).content, 'utf-8')\n asset_search = re.search(findstr, page)\n if asset_search:\n assets = asset_search.group(1)\n try:\n assets = json.loads(assets)\n except ValueError:\n print(\"Error loading JSON string\")\n self.assets = pd.DataFrame(assets)\n return self.assets\n else:\n raise AssetNotFoundError\n except:\n print(\"Failed to get asset information from page.\\nCheck video ID.\")", "def get_assets(symbols: List[str], search_limit: int = 100) -> Tuple:\n # 1) Get the available assets up to the specified limit\n assets = get_available_assets(search_limit)\n\n # 2) Filter out the wanted assets\n try:\n filtered_assets = filter_by_symbol(assets, symbols)\n except Exception as e:\n raise e # We may be wanting to do something about that\n\n # 3) For every selected asset, return its dataframe\n to_return = []\n for asset in filtered_assets:\n time.sleep(5)\n series = get_series(asset['id'], 'd1') # With d1 as interval, we select daily prices\n to_return.append(Asset(asset['symbol'], series))\n return tuple(to_return)", "def vectorized_alpha(asset, strategies):\n up = asset['forward_returns'][asset['forward_returns'] > 0]\n down = asset['forward_returns'][asset['forward_returns'] < 0]\n bh_alpha = np.sum(up) / np.abs(np.sum(down))\n\n strat_returns = asset['forward_returns'][:, np.newaxis].T * strategies\n up = strat_returns * (strat_returns[:, ] > 0)\n down = strat_returns * (strat_returns[:, ] < 0)\n strat_alpha = np.sum(up, axis=1) / np.abs(np.sum(down, axis=1))\n\n _alpha = (strat_alpha / bh_alpha) - 1\n return _alpha", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def __evalAlphas(self):\n #breit wheeler\n self.__alphaObjBW = alpha(self.getMomenta('bw'),self.__config)\n self.__alphaBW = [self.__alphaObjBW(index) for index in [1,2,3]]\n #compton\n self.__alphaObjC = alpha(self.getMomenta('c'),self.__config)\n self.__alphaC = [self.__alphaObjC(index) for index in [1,2,3]]\n #breit wheeler exchange\n self.__alphaObjBWx = alpha(self.getMomenta('bwx'),self.__config)\n self.__alphaBWx = [self.__alphaObjBWx(index) for index in [1,2,3]]\n #compton exchange\n self.__alphaObjCx = alpha(self.getMomenta('cx'),self.__config)\n self.__alphaCx = [self.__alphaObjCx(index) for index in [1,2,3]]\n self.__allAlphas = [self.__alphaBW,self.__alphaC,self.__alphaBWx,self.__alphaCx]", "def iter_beta_sheets(self):\n return iter(self.beta_sheet_list)", "def weighted_returns(self):\n r = self.asset_returns.fillna(0.0)\n return pd.DataFrame({a: r[a]*self.weights[a].dropna().shift(1).fillna(0.0) for a in self.assets})", "def _alpha_stats(self, trace):\n mean = np.mean(trace['alpha'])\n sd = np.std(trace['alpha'], ddof=1)\n zscore = mean / sd\n return mean, sd, zscore", "def load_multiple_assets(exchange_ids, assets, timeframe, start, end=None):\n df = pd.DataFrame()\n for ex_id in exchange_ids:\n for asset in assets:\n fpath = get_ohlcv_fpath(asset, ex_id, timeframe)\n if os.path.exists(fpath):\n data = load_asset(fpath, start, end)\n for col in data.columns:\n df[col] = data[col]\n else:\n print(\"Fpath does not exist: {:s}\".format(str(fpath)))\n # TODO: Is this okay? How to fill in missing values? How to handle them?\n # df.dropna(inplace=True)\n df['utc'] = [epoch_to_utc(t) for t in df.index]\n return df", "def _calculate_alpha(self, feats):\n \n init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n forward_var = autograd.Variable(init_alphas)\n\n for feat in feats:\n alphas_t = [] # The forward variables at this timestep\n for next_tag in range(self.tagset_size):\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n trans_score = self.transition[next_tag].view(1, -1)\n next_tag_var = forward_var + trans_score + emit_score\n alphas_t.append(log_sum_exp(next_tag_var))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transition[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha", "def getInputData():\n\n # Get current allocations.\n current_alloc_dict = DataIO.getCurrentData('data/current_allocations.csv')\n\n # Get tickers and expense ratios.\n ticker_list, expense_ratio_dict = DataIO.getTickerList(\n 'data/tickers_expenses.csv')\n\n # Get raw data.\n raw_data = DataIO.getRawData(ticker_list)\n\n # Create all stock objects.\n stock_dict = {}\n for ticker in raw_data.keys():\n stock_dict[ticker] = Stock(\n raw_data[ticker], ticker, expense_ratio_dict[ticker])\n\n if not len(stock_dict.keys()):\n raise ValueError('No keys found.')\n\n # Create stock database.\n stock_db = StockDatabase(stock_dict)\n\n # Create current portfolio.\n current_portfolio = Portfolio(\n stock_db, percent_allocations_dict=current_alloc_dict)\n\n return current_portfolio, stock_db", "def ArXivEprints(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('arxiv_eprints', default)\n return [HEP.ArXivObject(i) for i in tmp]", "def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta", "def extract_exp_betas(molecule_etree):\n BETA_XPATH = 'hunterdb:ExperimentalProperties/hunterdb:Property[@hunterdb:name=\"beta_expt\"]'\n return molecule_etree.xpath(BETA_XPATH, namespaces=HUNTER_DB_NAMESPACE_DICT)", "def project(self, alpha):\n ax = alpha[0]\n ay = alpha[1]\n az = alpha[2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n return [ax, ay, az]" ]
[ "0.571466", "0.5539759", "0.5527638", "0.5383794", "0.53507555", "0.5238355", "0.51728773", "0.5134495", "0.5117016", "0.50879073", "0.50690675", "0.5064041", "0.5010054", "0.49698728", "0.49637634", "0.49520984", "0.49412426", "0.4937754", "0.4913756", "0.49073732", "0.4898681", "0.4861123", "0.4858231", "0.48548", "0.4847356", "0.48459482", "0.48316336", "0.48278168", "0.48230937", "0.48187822" ]
0.7166975
0
Removes charracters listed in self.custom_chars
def _remove_custom_chars(self, text: str) -> str: patterns = "|".join([x for x in self.custom_chars]) return re.sub(patterns, "", str(text), flags=re.IGNORECASE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def strip_other_charcter():\n pass", "def remove_special_characters(string_list):", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def remove_punct(self,text):", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _remove_unknown_characters(self, text):\n exist = []\n missing_chars = set([])\n for each_char in text:\n if each_char not in self.char_2_imgs:\n if each_char == '・':\n exist.append(each_char)\n else:\n missing_chars.add(each_char)\n else:\n exist.append(each_char)\n\n return ''.join(exist), missing_chars", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def other_chars(self):\n return [sign for sign in re.findall(r'[^\\w\\s]', self.text)]", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def remove_extra_characters(self, text):\n if text:\n parsed_text = text\n parsed_text = parsed_text.replace(\"[\", \"\")\n parsed_text = parsed_text.replace(\"]\", \"\")\n parsed_text = parsed_text.replace(\"{\", \"\")\n parsed_text = parsed_text.replace(\"}\", \"\")\n parsed_text = parsed_text.replace(\"|\", \" \")\n parsed_text = parsed_text.replace(\"-\", \"\")\n parsed_text = parsed_text.replace(\"&nbsp;\", \"\")\n parsed_text = parsed_text.replace(\":'\", \"\")\n parsed_text = parsed_text.replace(\"'\", \"\")\n parsed_text = parsed_text.replace(\"#\", \"\")\n parsed_text = parsed_text.replace(\"':\", \"\")\n parsed_text = parsed_text.replace(\"=\", \"\")\n parsed_text = parsed_text.replace(\"*\", \"\")\n parsed_text = parsed_text.replace(\"/\", \"\")\n parsed_text = parsed_text.replace(\"<--\", \"\")\n parsed_text = parsed_text.replace(\"-->\", \"\")\n parsed_text = parsed_text.replace(\"<!--\", \"\")\n parsed_text = parsed_text.replace(\">\", \"\")\n parsed_text = parsed_text.replace(\"<\", \"\")\n\n parsed_text = parsed_text.replace('__NOTOC__', '')\n\n return parsed_text", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_non_ascii(self, words):\n\t\tnew_words = []\n\t\tfor word in words:\n\t\t\tnew_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n\t\t\tnew_words.append(new_word)\n\t\treturn new_words", "def CLEAN(text):\n return _control_char_re.sub('', text)" ]
[ "0.7372764", "0.7265707", "0.7070031", "0.6986273", "0.69196767", "0.68504214", "0.67211777", "0.66895443", "0.6674223", "0.6622369", "0.662198", "0.66078997", "0.65977836", "0.6588692", "0.65562934", "0.65343094", "0.6528675", "0.6523658", "0.6516981", "0.650481", "0.6504477", "0.64807856", "0.6475873", "0.6475873", "0.64515775", "0.64435786", "0.64316255", "0.6400398", "0.63946515", "0.6393171" ]
0.8835553
0
Removes strings starting with http
def _remove_urls(self, text: str) -> str: pattern = r"http\S+" return re.sub(pattern, " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def remove_url(text):\n return re.sub(r'http\\S+', ' ', text)", "def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def remove_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', '', text)\n return text", "def remove_mask(self, string):\n caracter = (r'[.\\/-]')\n if string[0:4] != 'http':\n match = re.search(caracter, string)\n if match:\n string = re.sub(caracter, '', string)\n\n return string", "def remove_urls(text):\n pass", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def remocion_de_urls(self, texto):\n \n texto = re.sub(r'http\\S+', '', texto)\n return texto", "def remove_url(text):\r\n url = re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n return url", "def clean(self, sub):\n sub = re.sub(r'^RT[\\s]+', '', sub)\n sub = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', sub)\n sub = re.sub(r'#', '', sub)\n sub = re.sub(r'@[A-Za-z0–9]+', '', sub) \n\n return sub", "def removeURL(text):\n text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',text)\n text = re.sub(r'#([^\\s]+)', r'\\1', text)\n return text", "def remove_url(txt):\n print(txt['fields']['tweet'])\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt['fields']['tweet']).split())", "def remove_url(sample):\n sample[\"full_text\"] = re.sub(r\"http\\S+\", \"\", sample[\"full_text\"])\n return sample", "def _remove_urls(text: str) -> str:\n pattern = r'(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?'\n\n return re.sub(pattern, '', text, flags=re.MULTILINE)", "def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def url_at_remove(text):\n text = re.sub(r'#\\w+|@\\w+',' ',text)\n # Remove url:\n return(re.sub(r'\\bhttps?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE))", "def _remove_urls(self, doc: str):\n processed_tweet = re.sub('(https?:)?\\/\\/[\\w\\.\\/-]+', '', doc)\n return processed_tweet", "def remove_urls(self, doc):\n doc = re.sub(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)'\n r'(?:[^\\s()<>]+|\\(([^\\s()<>]+|'\n r'(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|'\n r'[^\\s`!()\\[\\]{};:\\'\".,<>]))',\n '',\n doc)\n return ' '.join(doc.split())", "def remove_urls(self, tweet_text):\n\n url_free_tweet_text = \" \".join(\n re.sub(r\"http\\S+\", \"\", tweet_text).split())\n\n return url_free_tweet_text", "def removeurl(wordlist):\n newlist=[]\n for w in wordlist:\n phrases=str(w[0]).split()\n for phrase in phrases:\n if(phrase.startswith('http') is True):\n phrase=\"\"\n newlist.append((phrases,w[1])) \n return newlist", "def clean_content(content):\n content = content.strip()\n valid_words = content.split()\n valid_words = [word for word in valid_words if not word_is_url(word)]\n return \" \".join(valid_words)", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def url_removal(text):\n return re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]\\\n {2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]\\\n +|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', text)", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def clean_url(url):\n for noisy_url in noisy_urls:\n url = str(url).replace(noisy_url,\"\").lower()\n return url" ]
[ "0.78006214", "0.7698079", "0.7578181", "0.74963003", "0.7458265", "0.74112725", "0.73307616", "0.72641194", "0.72031736", "0.7188069", "0.71281874", "0.70712876", "0.7058085", "0.7049562", "0.7030817", "0.69787", "0.6963308", "0.69407505", "0.6912276", "0.68773633", "0.6809274", "0.67975545", "0.67813057", "0.67711765", "0.6759833", "0.6676425", "0.66684043", "0.6622973", "0.659795", "0.65748554" ]
0.780052
1
Removes isolated block of digits
def _remove_digit_blocks(self, text: str) -> str: return re.sub(r"\b\d+\b", " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_digit(self, values, box, digit):\n values[box] = values[box].replace(digit, '')\n return values", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def _remove_digits(text: str) -> str:\n table = str.maketrans('', '', digits)\n\n return text.translate(table)", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def compact(number):\n return clean(number, ' -./,').strip()", "def keep_digits(x: str) -> str:\n return \"\".join([c for c in x if c.isdigit()]).strip()", "def compact(number):\n return clean(number, ' -').strip()", "def strip_non_digits(x: str) -> str:\n exp = re.compile(\"[^\\d]+\")\n return re.sub(exp, \"\", x)", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def delete_first_zeros(digit_with_zeros): \n \n digit_without_zeros = \"\"\n\n snap = 1\n \n d = 0\n\n for d in digit_with_zeros:\n\n if d != \"0\":\n snap = 0\n if snap == 0:\n digit_without_zeros +=d\n \n return digit_without_zeros", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def strip_numbers(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"-?\\d+\")\n return re.sub(regex, \"\", text)", "def remove_numbers(text):\n return ''.join([i for i in text if not i.isdigit()])", "def remove_flight_numbers(text):\n return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def remove_letter(letter, strng):", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)", "def collapse_numbers(text: str):\n groups = re.findall(r\"[\\d|\\s]{1,}\", text)\n\n results = list()\n for numbers in groups:\n squashed = squash(numbers)\n if squashed != \"\":\n results.append(squashed)\n\n return results", "def compact(number):\n return clean(number, ' -.').upper().strip()" ]
[ "0.7002717", "0.6815302", "0.6815302", "0.6815302", "0.6682333", "0.6597962", "0.6564372", "0.6525351", "0.6459419", "0.6430528", "0.6379692", "0.63564974", "0.6303089", "0.6271742", "0.6265842", "0.6203773", "0.6179784", "0.6123884", "0.6112976", "0.610751", "0.6047446", "0.60082716", "0.6007147", "0.59223837", "0.5857433", "0.58574295", "0.58335686", "0.5807342", "0.58047295", "0.580168" ]
0.7632309
0
Removes special characters as defined by the pattern in self.special_chars_pattern
def _remove_special_chars(self, text: str) -> str: pattern = re.compile(self.special_chars_pattern) text = re.sub(pattern, " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def remove_special_characters(string_list):", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def remove_special_chars(s):\n stripped = re.sub('[^\\w\\s]', ' ', s)\n stripped = re.sub('_', ' ', stripped)\n\n # Make all whitespaces only one space\n stripped = re.sub('\\s+', ' ', stripped)\n\n stripped = stripped.strip()\n\n return stripped", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def remove_special_chars(sentence):\r\n result = re.sub(r\"[^a-zA-Z0-9.]+\", ' ', re.sub('\\.\\.+', ' ', sentence))\r\n return result", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def handle_special_symbols(text: str\n ) -> str:\n valid_special_symbols = {' ', '_'}\n\n def criteria(c: str\n ) -> str:\n return c if c.isalnum() or c in valid_special_symbols else ' '\n\n return ''.join(criteria(c) for c in list(text))", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def remove_special_tags(text):\n clean = re.compile('{.*?}')\n return re.sub(clean, '', text)", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _remove_bad_chars(self, expression):\n\n bad_chars = ['\"', \"'\", '/', ',', '.', '(', ')', '—', '&', ';', '$', '%', '‘', '’', '!', '?', '«', '»', '-', '<', '>',\n '+', '#', '|', ':', '_', '°', 'ª', 'º', '*', '{', '}', '[', ']']\n\n if isinstance(expression, str):\n for char in bad_chars:\n expression = expression.replace(char, ' ')\n elif isinstance(expression, list):\n expression = [token.replace(char, '') for char in bad_chars\n for token in expression]\n else:\n raise ValueError(f'expression must be a string or list. '\n 'type {type(expression)} was passed')\n\n return expression", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def remove_special_chars(company_names):\n regex_remove_special_chars = '([\\.&,/\\'])'\n regex_replace_special_chars = '[-–]'\n regex_replace_multiple_spaces = '[\\s]{2,}'\n feature_as_list = remove_sub_string(regex_remove_special_chars, company_names, False)\n feature_as_list = remove_sub_string(regex_replace_special_chars, feature_as_list, False, \" \")\n feature_as_list = remove_sub_string(regex_replace_multiple_spaces, feature_as_list, False, \" \")\n return feature_as_list" ]
[ "0.8309218", "0.7856203", "0.7768344", "0.76876336", "0.7650726", "0.76494753", "0.76118165", "0.7510932", "0.74934494", "0.7467364", "0.74673146", "0.72453177", "0.72366244", "0.71997553", "0.7166965", "0.71665037", "0.7158556", "0.7108985", "0.70958817", "0.6979312", "0.68859047", "0.68012077", "0.6785976", "0.67122483", "0.6678896", "0.66683275", "0.66525376", "0.66440725", "0.66388977", "0.6635551" ]
0.85286283
0
Return data (tuple of classes, params) for a given host.
def get_host_data(hostname, gettype='walk'): filteredNodes = Node.objects.filter(hostname=hostname) if (filteredNodes.count() == 1): node = filteredNodes[0] exclusions = get_exclusions(node) if gettype == 'work': (classes, params) = work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'optwork': (classes, params) = optimized_work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'classwork': (classes, params) = work_tree2(node, exclusions=exclusions) return (classes, params) elif gettype == 'walk': (classes, params) = walk_tree(node, exclusions=exclusions) return (classes, params) else: return ({}, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_host_data(self):\n\n raise NotImplementedError", "def get_host_variables(self, host):\n vars = {}\n for i in self.parsers:\n vars.update(i.get_host_variables(host))\n return vars", "def loadAllHostinfo():\n hidata={}\n str=\"\"\n keytypes=loadHostinfoKeys()\n keylist=sorted(hostinfo.keys())\n keylist.remove('hostname')\n for k in keylist:\n \tstr+=\" -p %s \" % k\n f=os.popen('/app/hostinfo/bin/hostinfo --noheader --csv %s' % str)\n data=f.read()\n f.close()\n strfd=cStringIO.StringIO(data)\n reader=csv.reader(strfd)\n\n for line in reader:\n \thost=line.pop(0)\n\thidata[host]={}\n\tfor key in keylist:\n\t data=line.pop(0)\n\t if not data:\n\t \tcontinue\n\t if keytypes[key]=='list':\n\t\thidata[host][key]=data.split(',')\n\t else:\n\t\thidata[host][key]=data\n\n return hidata,keytypes", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def host_info(self, host):\n\n endpoint = '/Domain/Host/Info'\n\n params = {\n 'Host' : host,\n }\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def getConfigs(self, host):\n raise \"not implemented\"", "def stats_get(self, host):\n\n s = self.get_stats(host, 'get')\n\n data = {\n 'missing_total': s['missing_total'],\n 'exists_total': s['exists_total'],\n 'current': s['current'],\n 'total': s['total']\n }\n\n return data", "def host(self, host):\n if host in self.hosts_:\n vals = defaultdict(list)\n for k, value in [(x.key.lower(), x.value) for x in self.lines_\n if x.host == host and x.key.lower() != \"host\"]:\n vals[k].append(value)\n flatten = lambda x: x[0] if len(x) == 1 else x\n return {k: flatten(v) for k, v in vals.items()}\n return {}", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def get_defaultvalues(host):\n return get_obj_defaultvalues(OBJT_HOST, host)", "def get_host_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_HOST_INFO)", "def get_dataset(data_pars=None, task_type=\"train\", **kw):\n # log(data_pars)\n data_type = data_pars.get('type', 'ram')\n cols_ref = cols_ref_formodel\n\n if data_type == \"ram\":\n # cols_ref_formodel = ['cols_cross_input', 'cols_deep_input', 'cols_deep_input' ]\n ### dict colgroup ---> list of colname\n\n cols_type_received = data_pars.get('cols_model_type2', {} ) ##3 Sparse, Continuous\n\n if task_type == \"predict\":\n d = data_pars[task_type]\n Xtrain = d[\"X\"]\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n return Xtuple_train\n\n if task_type == \"eval\":\n d = data_pars[task_type]\n Xtrain, ytrain = d[\"X\"], d[\"y\"]\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n return Xtuple_train, ytrain\n\n if task_type == \"train\":\n d = data_pars[task_type]\n Xtrain, ytrain, Xtest, ytest = d[\"Xtrain\"], d[\"ytrain\"], d[\"Xtest\"], d[\"ytest\"]\n\n ### dict colgroup ---> list of df\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n Xtuple_test = get_dataset_tuple(Xtest, cols_type_received, cols_ref)\n log2(\"Xtuple_train\", Xtuple_train)\n\n return Xtuple_train, ytrain, Xtuple_test, ytest\n\n\n elif data_type == \"file\":\n raise Exception(f' {data_type} data_type Not implemented ')\n\n raise Exception(f' Requires Xtrain\", \"Xtest\", \"ytrain\", \"ytest\" ')", "def fetch_host_caps(self, host):\n e = host.executor()\n cmd_cpuinfo = (\n 'grep', 'vendor_id', '/proc/cpuinfo', '|',\n 'sort', '|',\n 'uniq', '|',\n 'cut', '-d:', '-f2',\n )\n with e.session() as ss:\n # Find vendor\n rc, out, err = ss.run_cmd(cmd_cpuinfo)\n vendor = out.strip()\n if rc or not vendor:\n raise CpuModelError(\"Can not resolve host's cpuinfo: %s\" % err)\n\n # List cpu models\n vds_caps = host.vds_client(cmd=\"Host.getCapabilities\")\n vds_caps = dict() if not vds_caps else vds_caps\n cpu_flags = vds_caps.get(\"cpuFlags\", \"\").split(\",\")\n models = [i for i in cpu_flags if \"model_\"in i]\n if not models:\n logger.warning(\"Can not resolve host's models: %s\", err)\n models = [\n MIN_MODEL.get(self._id_to_vendor(vendor))\n ]\n logger.warning(\n \"Setting minimal cpu model for %s: %s\", vendor, models[0])\n return {\n 'models': models,\n 'vendor': vendor,\n }", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def _get_vm_instance_data(self, services, deployment, deployed_app):\n internal_service, external_service = self._get_internal_external_services_set(\n services\n )\n\n data = [\n VmDetailsProperty(key=\"Image\", value=self._get_image(deployment)),\n VmDetailsProperty(\n key=\"Replicas\", value=self._get_replicas(deployment, deployed_app)\n ),\n VmDetailsProperty(\n key=\"Ready Replicas\", value=self._get_ready_replicas(deployment)\n ),\n VmDetailsProperty(\n key=\"Internal IP\", value=self.get_internal_ip(internal_service)\n ),\n VmDetailsProperty(\n key=\"Internal Ports\", value=self._get_service_ports(internal_service)\n ),\n VmDetailsProperty(\n key=\"External IP\", value=self.get_external_ip(external_service)\n ),\n VmDetailsProperty(\n key=\"External Ports\",\n value=self._get_external_service_ports(external_service),\n ),\n ]\n\n return data", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))", "def extract_device_information(self, host_dict):\n self.host_list = []\n if self.args.hostname is None:\n try:\n hosts_val = self.main_file[\"hosts\"]\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED\n + \"\\nERROR occurred !! Hostname not given properly %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED + \"\\nERROR occurred !! %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n else:\n # when group of devices are given, searching for include keyword in\n # hosts in main.yaml file\n self.get_hosts_list(hosts_val, host_dict)\n else:\n # login credentials are given from command line\n host_dict[\"0\"] = {\n \"device\": self.args.hostname,\n \"username\": self.args.login,\n \"passwd\": self.args.passwd,\n }\n self.host_list.append(self.args.hostname)", "def get_dataset(params):\r\n module_name, class_name = params.dataset.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def __getitem__(self, host):\n if IS_PY2:\n assert type(host) in (str, unicode), 'Wrong type for [host], should be a string [was {0}]'.format(\n type(host))\n else:\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n return self._scan_result['scan'][host]", "def get_host(self, host):\n for droplet in self.do.droplets:\n if droplet[\"ip_address\"] == host:\n return {\"do_{}\".format(k): v for k, v in droplet.iteritems()}\n return {}", "def host(self, host: str, fields: str = None) -> dict:\n endpoint = f\"/api/host/{host}\" if host else \"/api/host/\"\n ret = self._request(\n endpoint=endpoint,\n params={\"fields\": fields} if fields else {},\n )\n return ret", "def multiple_device_details(\n self, hosts, config_data, pre_name, action, post_name):\n res_obj = []\n self.host_list = []\n host_dict={}\n\n first_entry = hosts[0]\n if 'include' in first_entry:\n devices_file_name = first_entry['include']\n if os.path.isfile(devices_file_name):\n lfile = devices_file_name\n else:\n lfile = os.path.join(\n expanduser(get_path(\n 'DEFAULT',\n 'test_file_path')),\n devices_file_name)\n login_file = open(lfile, 'r')\n dev_file = yaml.load(login_file)\n gp = first_entry.get('group', 'all')\n\n dgroup = [i.strip().lower() for i in gp.split(',')]\n for dgp in dev_file:\n if dgroup[0].lower() == 'all' or dgp.lower() in dgroup:\n for val in dev_file[dgp]:\n hostname = list(val)[0]\n self.log_detail = {'hostname': hostname}\n if val.get(hostname) is not None and hostname not in host_dict:\n host_dict[hostname] = deepcopy(val.get(hostname))\n self.host_list.append(hostname)\n else:\n for host in hosts:\n try:\n hostname = host['device']\n self.log_detail = {'hostname': hostname}\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! KeyError 'device' key not found\",\n extra=self.log_detail)\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! %s\" %\n ex,\n extra=self.log_detail)\n else:\n if hostname not in host_dict:\n self.host_list.append(hostname)\n host_dict[hostname] = deepcopy(host)\n\n for (hostname, key_value) in iteritems(host_dict):\n username = key_value.get('username')\n password = key_value.get('passwd')\n key_value = self.get_values(key_value)\n t = Thread(\n target=self.connect,\n args=(\n hostname,\n username,\n password,\n pre_name,\n config_data,\n action,\n post_name),\n kwargs= key_value\n )\n t.start()\n t.join()\n if action == \"snap\":\n if not self.snap_q.empty():\n res_obj.append(self.snap_q.get())\n elif action in [\"snapcheck\", \"check\"]:\n if not self.q.empty():\n res_obj.append(self.q.get())\n else:\n res_obj.append(None)\n\n return res_obj", "def get_host_info(search_keyword, starbucks_data, city_info):\n host_data = []\n\n payload = {\n \"query_type\": \"RQBXY\",\n \"pagesize\": \"20\",\n \"pagenum\": '',\n \"qii\": \"true\",\n \"cluster_state\": \"5\",\n \"need_utd\": \"true\",\n \"utd_sceneid\": \"1000\",\n \"div\": \"PC1000\",\n \"addr_poi_merge\": \"true\",\n \"is_classify\": \"true\",\n \"zoom\": \"14\",\n \"longitude\": starbucks_data['longitude'],\n \"latitude\": starbucks_data['latitude'],\n \"range\": \"1000\",\n \"city\": city_info[1][0],\n \"keywords\": search_keyword,\n }\n\n for page_num in range(1, 3):\n payload['pagenum'] = page_num\n poi_list = request_amap_poi_info(payload, 'https://www.amap.com/place/' + starbucks_data['amap_key'])\n\n if not poi_list:\n print('request host list fail with %s' % page_num)\n continue\n\n for poi in poi_list:\n if not (poi.get('longitude', '') or poi.get('latitude', '') or starbucks_data['longitude'] or starbucks_data['latitude']):\n distance = None\n else:\n distance = geo_distance(poi.get('longitude', ''), poi.get('latitude', ''),starbucks_data['longitude'], starbucks_data['latitude'])\n\n data = {\n 'starbucks_key': starbucks_data['amap_key'],\n 'keyword': search_keyword,\n 'city': poi.get('cityname'),\n 'name': poi.get('name'),\n 'longitude': poi.get('longitude'),\n 'latitude': poi.get('latitude'),\n 'address': poi.get('address'),\n 'tel': poi.get('tel'),\n 'mean_price': '',\n 'distance': distance\n }\n domain_list = poi.get('domain_list')\n for domain in domain_list:\n if domain.get('name', '') == 'price':\n price_raw = domain.get('value', '')\n # price_raw = \"<font color='#90969a'>人均:</font><font color='#f84b57'>¥</font><font color='#f84b57'>114</font>\"\n try:\n data['mean_price'] = re.findall('<.*>人均:<.*>¥<.*>([0-9]+)</font>', price_raw)[0]\n except:\n data['mean_price'] = None\n break\n host_data.append(data)\n\n print('【%s】的【%s】的周边的【%s】菜系,第【%d】页爬取完毕' % (city_info[1], starbucks_data['name'], search_keyword, page_num))\n return host_data", "def stats_search(self, host):\n\n s = self.get_stats(host, 'search')\n\n data = {\n 'query_total': s['query_total'],\n 'fetch_time_in_millis': s['query_time_in_millis'],\n 'fetch_total': s['fetch_total'],\n 'query_time_in_millis': s['fetch_time_in_millis'],\n 'open_contexts': s['open_contexts'],\n 'fetch_current': s['fetch_current'],\n 'query_current': s['query_current']\n }\n\n return data", "def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes", "def _nodeinfo_endpoint(host):\n zkclient = context.GLOBAL.zk.conn\n nodeinfo_zk_path = '{}/{}'.format(z.ENDPOINTS, 'root')\n for node in zkclient.get_children(nodeinfo_zk_path):\n if 'nodeinfo' in node and host in node:\n data, _metadata = zkclient.get(\n '{}/{}'.format(nodeinfo_zk_path, node)\n )\n return data.decode().split(':')", "def getHostInfo():", "def get_services(host):\n services = query(\"$.host.'{host}'.service\", host=host)\n return services", "def get(self, host):\n return self.__locusts__[host]" ]
[ "0.6268357", "0.5708121", "0.5704203", "0.5604825", "0.5533057", "0.5413477", "0.54038036", "0.53823394", "0.52971464", "0.5290275", "0.5289944", "0.52613753", "0.52593875", "0.5238726", "0.52148", "0.5184951", "0.5168947", "0.51548314", "0.51526666", "0.5109809", "0.5093862", "0.50382835", "0.5014954", "0.49998134", "0.4991427", "0.4991188", "0.49762976", "0.49720556", "0.4968718", "0.4963464" ]
0.652414
0
Adds a node entry definition if there is no lower depth definition. Raises RuntimeError if the depth matches.
def add_entry(self, key, value, depth): current = self.entries.get(key, None) if current is None or current.depth > depth: self.entries[key] = NodeEntry(key, value, depth) elif current.depth == depth: raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add_node(self, node):\r\n self.undeclared_nodes.append(node)", "def init_recursion_depth_entry(self):\n vcmd = (self.frame.register(self.validate_integer), '%P')\n # input validation clarification\n # https://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter\n self.entries[\"ent_recursion_depth\"] = Entry(\n self.frame, width=2,\n validate='key', validatecommand=vcmd)\n self.labels[\"lbl_recursion_depth\"] = Label(\n self.frame, text=\"Recursion Depth (int)\")\n self.entries[\"ent_recursion_depth\"].grid(\n row=0, column=1, sticky=W, pady=(30, 0))\n self.labels[\"lbl_recursion_depth\"].grid(\n row=0, column=0, sticky=W, pady=(30, 0))", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def _add(self, root, element, currentDepth):\n # When adding an element from the actual node, all elements less important\n # than the actual node are ALWAYS in the right branch, but the most importants\n # are on the left branch\n if root.data < element:\n if root.left == None:\n root.left = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.left\n else:\n # print \"Going to left branch at depth\", currentDepth\n return self._add(root.left, element, currentDepth + 1)\n else:\n if root.right == None:\n # print \"Adding new right leave\", element\n root.right = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.right\n else:\n # print \"Going to right branch at depth\", currentDepth\n return self._add(root.right, element, currentDepth + 1)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add(self):\r\n value = int(self.value_entry.get())\r\n self.value_entry.delete(0, tk.END)\r\n self.value_entry.focus_force()\r\n\r\n self.root.add_node(value)\r\n self.draw_tree()", "def add_node(self, name):\n if not name in self._main_dictionary:\n self._main_dictionary[name] = set()", "def add_line_info(root_node):\n class AddLineNumbers(BottomUpVisitor):\n def __init__(self):\n BottomUpVisitor.__init__(self, strict_line_order=True, make_unique=True)\n def visit_one_node(self, node, lineno=None):\n# print(node, lineno, getattr(node, 'lineno', None))\n if not hasattr(node, 'lineno'):\n node.lineno = lineno\n else:\n if node.lineno != lineno:\n print(node, lineno, node.lineno)\n print(astor.dump(root_node))\n assert False\n BottomUpVisitor.visit_one_node(self, node, lineno)\n AddLineNumbers().visit(root_node)", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_edge(self, parent, child):\r\n if child not in self.undeclared_nodes:\r\n raise LookupError(\"Node does not exist in undeclared nodes\")\r\n tree_node_parent = self.find_node(parent)\r\n tree_node_child = TreeNode(child)\r\n tree_node_child.parent = tree_node_parent\r\n tree_node_parent.children.append(tree_node_child)\r\n self.undeclared_nodes.remove(child)", "def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def set_recursion_depth_entry(self, recursion_depth):\n self.entries[\"ent_recursion_depth\"].delete(0, END)\n self.entries[\"ent_recursion_depth\"].insert(\n 0, str(recursion_depth))", "def add_node(self, val):\n if val not in self:\n self.setdefault(val, {})", "def definition(self):\n\n if getattr(self, \"_definition_guard\", False):\n raise NodeDefinitionError(\"node definition has a circular dependency\")\n\n if not getattr(self, \"_traits_initialized_guard\", False):\n raise NodeDefinitionError(\"node is not yet fully initialized\")\n\n try:\n self._definition_guard = True\n\n nodes = []\n refs = []\n definitions = []\n\n def add_node(node):\n for ref, n in zip(refs, nodes):\n if node == n:\n return ref\n\n # get base definition\n d = node._base_definition\n\n if \"inputs\" in d:\n # sort and shallow copy\n d[\"inputs\"] = OrderedDict([(key, d[\"inputs\"][key]) for key in sorted(d[\"inputs\"].keys())])\n\n # replace nodes with references, adding nodes depth first\n for key, value in d[\"inputs\"].items():\n if isinstance(value, Node):\n d[\"inputs\"][key] = add_node(value)\n elif isinstance(value, (list, tuple, np.ndarray)):\n d[\"inputs\"][key] = [add_node(item) for item in value]\n elif isinstance(value, dict):\n d[\"inputs\"][key] = {k: add_node(v) for k, v in value.items()}\n else:\n raise TypeError(\"Invalid input '%s' of type '%s': %s\" % (key, type(value)))\n\n if \"attrs\" in d:\n # sort and shallow copy\n d[\"attrs\"] = OrderedDict([(key, d[\"attrs\"][key]) for key in sorted(d[\"attrs\"].keys())])\n\n # get base ref and then ensure it is unique\n ref = node.base_ref\n while ref in refs:\n if re.search(\"_[1-9][0-9]*$\", ref):\n ref, i = ref.rsplit(\"_\", 1)\n i = int(i)\n else:\n i = 0\n ref = \"%s_%d\" % (ref, i + 1)\n\n nodes.append(node)\n refs.append(ref)\n definitions.append(d)\n\n return ref\n\n # add top level node\n add_node(self)\n\n # finalize, verify serializable, and return\n definition = OrderedDict(zip(refs, definitions))\n definition[\"podpac_version\"] = podpac.__version__\n json.dumps(definition, cls=JSONEncoder)\n return definition\n\n finally:\n self._definition_guard = False", "def definition(self):\n\n if getattr(self, \"_definition_guard\", False):\n raise NodeDefinitionError(\"node definition has a circular dependency\")\n\n if not getattr(self, \"_traits_initialized_guard\", False):\n raise NodeDefinitionError(\"node is not yet fully initialized\")\n\n try:\n self._definition_guard = True\n\n nodes = []\n refs = []\n definitions = []\n\n def add_node(node):\n for ref, n in zip(refs, nodes):\n if node == n:\n return ref\n\n # get base definition\n d = node._base_definition\n\n if \"inputs\" in d:\n # sort and shallow copy\n d[\"inputs\"] = OrderedDict([(key, d[\"inputs\"][key]) for key in sorted(d[\"inputs\"].keys())])\n\n # replace nodes with references, adding nodes depth first\n for key, value in d[\"inputs\"].items():\n if isinstance(value, Node):\n d[\"inputs\"][key] = add_node(value)\n elif isinstance(value, (list, tuple, np.ndarray)):\n d[\"inputs\"][key] = [add_node(item) for item in value]\n elif isinstance(value, dict):\n d[\"inputs\"][key] = {k: add_node(v) for k, v in value.items()}\n else:\n raise TypeError(\"Invalid input '%s' of type '%s': %s\" % (key, type(value)))\n\n if \"attrs\" in d:\n # sort and shallow copy\n d[\"attrs\"] = OrderedDict([(key, d[\"attrs\"][key]) for key in sorted(d[\"attrs\"].keys())])\n\n # get base ref and then ensure it is unique\n ref = node.base_ref\n while ref in refs:\n if re.search(\"_[1-9][0-9]*$\", ref):\n ref, i = ref.rsplit(\"_\", 1)\n i = int(i)\n else:\n i = 0\n ref = \"%s_%d\" % (ref, i + 1)\n\n nodes.append(node)\n refs.append(ref)\n definitions.append(d)\n\n return ref\n\n # add top level node\n add_node(self)\n\n # finalize, verify serializable, and return\n definition = OrderedDict(zip(refs, definitions))\n definition[\"podpac_version\"] = podpac.__version__\n json.dumps(definition, cls=JSONEncoder)\n return definition\n\n finally:\n self._definition_guard = False", "def add_item_definition():\n nonlocal guid\n nonlocal guid_stack\n nonlocal tree\n\n current_leaf_add(guid, {}, tree, guid_stack)\n guid_stack.append(guid)\n guid += 1\n\n # Wrapping this current_leaf_add is defensive coding so we don't\n # crash on malformed glm files.\n if len(full_token) > 1:\n # Do we have a clock/object or else an embedded configuration\n # object?\n if len(full_token) < 4:\n # Add the item definition.\n current_leaf_add(full_token[0], full_token[-2], tree,\n guid_stack)\n elif len(full_token) == 4:\n # We likely have an embedded/nested object.\n current_leaf_add('omfEmbeddedConfigObject',\n full_token[0] + ' ' +\n list_to_string(full_token), tree,\n guid_stack)\n else:\n # Something is wrong.\n raise UserWarning('Malformed GridLAB-D model. Token: {}'\n .format(' '.join(full_token)))\n\n # All done.", "def add_entry(self, new_entry):\n existing_entry = self._entries.get(new_entry.key)\n if existing_entry is not None:\n existing_entry.add_menge(new_entry.get_menge())\n for occ in new_entry.occurrences:\n existing_entry.add_occurrence(occ)\n return existing_entry\n else:\n self._entries[new_entry.key] = new_entry\n self._order.append(new_entry.key)\n return None", "def add_node(self, new_node):\n current = self.root\n\n while True:\n\n if current is None:\n current = new_node\n return\n\n if new_node.data < current.data:\n current = current.left\n else:\n current = current.right", "def addTree(self, depth, fanout):\n isSwitch = depth > 0\n if isSwitch:\n node = self.addSwitch('s%s' % self.switchNum)\n self.switchNum += 1\n for _ in range(fanout):\n child = self.addTree(depth - 1, fanout)\n self.addLink(node, child)\n else:\n node = self.addHost('h%s' % self.hostNum)\n self.hostNum += 1\n return node", "def add_node(self, node):\n index = self._node_index.setdefault(node.ntype, dict())\n if node.ext_id not in index:\n index.setdefault(node.ext_id, node)\n self._type_list.setdefault(node.ntype, list()).append(node)", "def addroot(head, curchange):\n ellipsisroots[head].add(curchange)\n # Recursively split ellipsis heads with 3 roots by finding the\n # roots' youngest common descendant which is an elided merge commit.\n # That descendant takes 2 of the 3 roots as its own, and becomes a\n # root of the head.\n while len(ellipsisroots[head]) > 2:\n child, roots = splithead(head)\n splitroots(head, child, roots)\n head = child # Recurse in case we just added a 3rd root", "def add_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n self.d.update({str(n): set()})", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(__class__, self).add_node(node)" ]
[ "0.55656815", "0.5325866", "0.5322028", "0.52112687", "0.5193852", "0.5172305", "0.5172305", "0.51599175", "0.5064102", "0.49744448", "0.4960249", "0.49437156", "0.49024606", "0.48839802", "0.48804682", "0.4869079", "0.48573893", "0.48446208", "0.48397067", "0.48307618", "0.48307618", "0.48272246", "0.48117214", "0.4761298", "0.475108", "0.47383538", "0.47268257", "0.4723984", "0.47208777", "0.470479" ]
0.6691731
0
Adds all the entries in objs at the current depth.
def add_entries(self, objs, keyname, valuename, depth): add_entry = self.add_entry for obj in objs: key = getattr(obj, keyname, None) if key is None: continue value = getattr(obj, valuename, None) add_entry(key, value, depth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addAll(self, objs):\n self.getSession().add_all(objs)\n self.commit() # paranoially\n return objs", "def add(self, fetchables, depth=1):\n if fetchables:\n if isinstance(fetchables, collections.Sequence):\n for fetchable in fetchables:\n self.add(fetchable, depth)\n else:\n log.debug(\"Adding to queue: %s (depth=%s)\", fetchables, depth)\n self.q.append((fetchables, depth))", "def populate_objects(self):\n\t\t\n\t\t# Don't populate if already done\n\t\tif self.objects:\n\t\t\treturn\n\t\t\n\t\tself.object_dirs = []\n\t\tdir_regex = re.compile(\"^[0-9a-f]{2}$\")\n\t\tfile_regex = re.compile(\"^[0-9a-f]{38}$\")\n\t\t\n\t\t# Get list of object dirs\n\t\tfor o_dir in os.listdir(self.objects_root):\n\t\t\to_dir_path = os.path.join(self.objects_root, o_dir)\n\t\t\tif re.match(dir_regex, o_dir) and os.path.isdir(o_dir_path):\n\t\t\t\t# Looks like an object dir so far\n\t\t\t\tself.object_dirs.append((o_dir, o_dir_path))\n\t\t\n\t\t# Get list of object files\n\t\tfor o_dir, o_dir_path in self.object_dirs:\n\t\t\tfor o_file in os.listdir(o_dir_path):\n\t\t\t\to_file_path = os.path.join(o_dir_path, o_file)\n\t\t\t\tif re.match(file_regex, o_file) and os.path.isfile(o_file_path):\n\t\t\t\t\t# Looks like an object file so far\n\t\t\t\t\tself.objects.append(\n\t\t\t\t\t\tGitLooseObject(\n\t\t\t\t\t\t\tid = o_dir + o_file,\n\t\t\t\t\t\t\tpath = o_file_path\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def add_all(self, objects):\n self.lock.acquire()\n self.__Session.add_all(objects)\n self.__Session.commit()\n self.lock.release()", "def addObjects(self):\n\n self.root = self.addRoot()\n vTemp = transform.getOffsetPosition(self.root, [0, 1, 0])\n self.top_loc = self.addLoc(\"top\", self.root, vTemp)\n centers = [self.root, self.top_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [0, -1, 0])\n self.bottom_loc = self.addLoc(\"bottom\", self.root, vTemp)\n centers = [self.root, self.bottom_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [1, 0, 0])\n self.ext_loc = self.addLoc(\"ext\", self.root, vTemp)\n centers = [self.root, self.ext_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [-1, 0, 0])\n self.int_loc = self.addLoc(\"int\", self.root, vTemp)\n centers = [self.root, self.int_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)", "def _add_all_to_tree(elms, trie):\n for elm in elms:\n tokens = tokenize(elm.name)\n for token in tokens:\n trie.add(token, elm)", "def _iter_add(self, root):\n stack = [root]\n while stack:\n nodes = stack.pop()\n for node in nodes:\n if node in self._members:\n continue\n self._members.add(node)\n\n if isinstance(node, tf.Tensor):\n stack.append((node.op,))\n elif isinstance(node, tf.Operation):\n stack.append(node.inputs)", "def add_scene_objects(self, obj_tid_catids):\n self._scene_objects.extend(obj_tid_catids)\n # for oid, scene_object in scene.objects.items():\n # if scene_object.label in ('book', 'wall', 'floor'):\n # self._ignored_cats.add(scene_object.label)\n # continue\n # try:\n # cat = TRANSLATIONS_CATEGORIES[scene_object.label]\n # except KeyError:\n # cat = scene_object.label\n #\n # try:\n # cat_id = CATEGORIES[cat]\n # self._scene_objects.append((scene_object, idx_t, cat_id))\n # except KeyError:\n # self._ignored_cats.add(cat)", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def addToTree(self, name, path, objtype, objs):\n\t\titem = None\n\t\timageSize = (16, 16)\n\t\til = wx.ImageList(imageSize[0], imageSize[1])\n\t\tfolderIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, imageSize))\n\t\tfolderOpenIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, imageSize))\n\t\tfileIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_REPORT_VIEW, wx.ART_OTHER, imageSize))\n\n\t\t#if objtype in [\"lif\", \"lei\", \"txt\", \"ome.tif\"]:\n\t\t#\tpath = path + name\n\t\t\n\t\tfor i in range(0, len(objs)):\n\t\t\tif not path in self.items:\n\t\t\t\tself.items[path] = 1\n\t\t\telse:\n\t\t\t\tself.items[path] += 1\n\t\t\n\t\tfor i in objs:\n\t\t\tself.dataUnitToPath[i] = path\n\n\t\tif objtype == \"lsm\":\n\t\t\tif not self.lsmfiles:\n\t\t\t\tself.lsmfiles = self.tree.AppendItem(self.root, \"LSM files\")\n\t\t\t\tself.tree.SetPyData(self.lsmfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.lsmfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.lsmfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.lsmfiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\") \n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\n\t\telif objtype in [\"txt\",\"lei\"]:\n\t\t\tif not self.leicafiles:\n\t\t\t\tself.leicafiles = self.tree.AppendItem(self.root, \"Leica files\")\n\t\t\t\tself.tree.SetPyData(self.leicafiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.leicafiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.leicafiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded) \n\n\t\t\titem = self.leicafiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype == \"oif\":\n\t\t\tif not self.oiffiles:\n\t\t\t\tself.oiffiles = self.tree.AppendItem(self.root, \"Olympus files\")\n\t\t\t\tself.tree.SetPyData(self.oiffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.oiffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.oiffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.oiffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype == \"pic\":\n\t\t\tif not self.bioradfiles:\n\t\t\t\tself.bioradfiles = self.tree.AppendItem(self.root, \"BioRad files\")\n\t\t\t\tself.tree.SetPyData(self.bioradfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.bioradfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bioradfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.bioradfiles\n\t\t\tself.tree.Expand(item)\n\t\t\n\t\telif objtype == \"hdr\":\n\t\t\tif not self.interfilefiles:\n\t\t\t\tself.interfilefiles = self.tree.AppendItem(self.root, \"Interfile files\")\n\t\t\t\tself.tree.SetPyData(self.interfilefiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.interfilefiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.interfilefiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.interfilefiles\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\telif objtype == \"bxd\":\n\t\t\tif not self.bxdfiles:\n\t\t\t\tself.bxdfiles = self.tree.AppendItem(self.root, \"BioImageXD files\")\n\t\t\t\tself.tree.SetPyData(self.bxdfiles, \"1\") \n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.bxdfiles\n\t\t\tself.tree.Expand(item) \n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\n\t\t\tself.tree.SetPyData(item, \"2\") \n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\telif objtype == \"bxc\":\n\t\t\tif not self.bxdfiles:\n\t\t\t\tself.bxdfiles = self.tree.AppendItem(self.root, \"BioImageXD files\")\n\t\t\t\tself.tree.SetPyData(self.bxdfiles, \"1\") \n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.bxdfiles\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\telif objtype == \"lif\":\n\t\t\tif not self.liffiles:\n\t\t\t\tself.liffiles = self.tree.AppendItem(self.root, \"LIF files\")\n\t\t\t\tself.tree.SetPyData(self.liffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.liffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.liffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.liffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype in [\"mrc\",\"st\"]:\n\t\t\tif not self.mrcfiles:\n\t\t\t\tself.mrcfiles = self.tree.AppendItem(self.root, \"MRC files\")\n\t\t\t\tself.tree.SetPyData(self.mrcfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.mrcfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.mrcfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.mrcfiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\telif objtype == \"ome.tif\":\n\t\t\tif not self.ometiffiles:\n\t\t\t\tself.ometiffiles = self.tree.AppendItem(self.root, \"OME-TIFF files\")\n\t\t\t\tself.tree.SetPyData(self.ometiffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.ometiffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.ometiffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.ometiffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\tself.tree.Expand(item)\n\t\tselected = 0\n\t\tfor obj in objs:\n\t\t\tadded = self.tree.AppendItem(item, obj.getName())\n\t\t\t\t\n\t\t\tresampledims = obj.dataSource.getResampleDimensions()\n\t\t\tif resampledims and resampledims != (0, 0, 0):\n\t\t\t\tself.markRed([added], \"*\")\n\t\t\tself.tree.SetPyData(added, obj) \n\t\t\tself.tree.SetItemImage(added, fileIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t#self.tree.SetItemImage(added,folderOpenIndex,which=wx.TreeItemIcon_Expanded)\n\t\t\tself.tree.EnsureVisible(added)\n\t\t\tself.dataUnitItems.append(added)\n\t\t\t\n\t\t\tif len(self.items.keys()) == 1 and not selected:\n\t\t\t\tself.tree.UnselectAll()\n\t\t\t\tself.tree.SelectItem(added, 1)\n\t\t\t\tselected = 1\n\t\t\t\tlib.messenger.send(None, \"tree_selection_changed\", obj)\n\t\t\t\n\t\tself.tree.Expand(self.root)\n\t\tconf = Configuration.getConfiguration()\n\t\tlst = self.items.keys()\n\t\tconf.setConfigItem(\"FileList\", \"General\", lst)\n\t\tconf.writeSettings()", "def add_objects(self, objects):\n requests = []\n for obj in objects:\n requests.append({\"action\": \"addObject\", \"body\": obj})\n request = {\"requests\": requests}\n return self.batch(request)", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "def _build_tree(self, root, obj):\n\n if obj is None:\n return\n\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n element = root.add_child(attr.name)\n self._build_tree(element, getattr(obj, attr_name))\n elif isinstance(attr, XmlAttributeProperty):\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def _add_objects(self, object_list):\n\n object_types = set([t for _, t in object_list])\n if not object_types.issubset(self.types):\n # for debugging\n s = \"The types found in the problem file must be a subset of the types listed in the domain file\\n\"\n s += \"Domain types: %s\" % str(self.types) + \"\\n\"\n s += \"Problem types: %s\" % str(object_types)\n raise ValueError(s)\n\n for obj, t in object_list:\n self.objects.add(obj)\n\n if t not in self.type_to_obj:\n self.type_to_obj[t] = set([])\n self.type_to_obj[t].add(obj)\n\n self.obj_to_type[obj] = set([])\n k = t\n while k in self.parent_types:\n self.obj_to_type[obj].add(k)\n k = self.parent_types[k]", "def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)", "def union(self, *objects):\n roots = [self[x] for x in objects]\n # Find the heaviest root according to its weight.\n heaviest = max(roots, key=lambda r: self.weights[r])\n for r in roots:\n if r != heaviest:\n self.weights[heaviest] += self.weights[r]\n self.parents[r] = heaviest", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def add_children(self, children: dict) -> None:\n for child in children:\n self.children[child.move] = child", "def __iadd__(self, obj):\n if not vedo.utils.is_sequence(obj):\n obj = [obj]\n for a in obj:\n if a:\n self.AddPart(a)\n return self", "def add_entries(self, *entries: Entry):\n for entry in entries:\n self.add_entry(entry)", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def addAll(self, *args):\n pass", "def extend(self, objects: Iterable[Any]) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.extend(list(map(panel, objects)))\n self.objects = new_objects", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def append(self, subnodes):\n if not hasattr(subnodes, \"__iter__\"):\n subnodes = [subnodes]\n\n for subnode in subnodes:\n try:\n if not issubclass(type(subnode), pyfdt.FdtNop):\n index = self.index(subnode.name)\n item = self.pop(index)\n else:\n item = None\n except ValueError:\n item = None\n\n if isinstance(item, pyfdt.FdtNode) and isinstance(\n subnode, pyfdt.FdtNode\n ):\n item.merge(subnode)\n subnode = item\n\n super().append(subnode)", "def expand(obj):\r\n if isinstance(obj, list):\r\n for i,o in enumerate(obj):\r\n obj[i] = expand(o)\r\n elif isinstance(obj, dict):\r\n if 'paging' in obj:\r\n current = obj\r\n i = 0\r\n while 'next' in current['paging']:\r\n i += 1\r\n logger.info('...{}'.format(i))\r\n current = GraphQuery.request_until_success(\r\n current['paging']['next']\r\n )\r\n obj['data'].extend(current['data'])\r\n return obj", "def add(self, item):\r\n self.root = self.recurse_add(self.root, item)", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def addAll(self,*args, **kwargs):\n pass", "def add_ents(self, ents: Iterable['Entity']) -> None:\n ents = list(ents)\n self.entities.extend(ents)\n for item in ents:\n self.by_class[item['classname'].casefold()].add(item)\n self.by_target[item['targetname', ''].casefold() or None].add(item)\n if 'nodeid' in item:\n try:\n node_id = int(item['nodeid'])\n except (TypeError, ValueError):\n pass\n else:\n item['nodeid'] = str(self.node_id.get_id(node_id))" ]
[ "0.61764467", "0.60474694", "0.57324225", "0.56951404", "0.55937594", "0.55879956", "0.558788", "0.54297394", "0.5391662", "0.53283435", "0.53044546", "0.52996117", "0.5273503", "0.5259931", "0.52462256", "0.5240757", "0.52057797", "0.51749694", "0.5160668", "0.5155546", "0.5145461", "0.510169", "0.5095795", "0.5082552", "0.5081789", "0.5071895", "0.5062649", "0.5053789", "0.5050436", "0.4997458" ]
0.76109475
0
Returns the entries as a key => value dict.
def as_dict(self): return dict((key, value) for key, value, depth in self.entries.itervalues())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d", "def GetMap(entries):\n map = {}\n for entry in entries:\n map[entry['key']] = entry['value']\n return map", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def asDictionary (self) -> Dictionary:\n\n Logging.trace(\">>\")\n result = dict(self._keyToStringValueMap)\n Logging.trace(\"<<: %r\", result)\n return result", "def to_dict(self):\n return {key: getattr(self, key) for key in self.keys}", "def asPyDict(self):\n fieldDict = dict()\n for kvp in self.keyvaluepair_set.all():\n fieldDict[kvp.key] = kvp.value\n return fieldDict", "def items(self):\n return ((key, value) for (key, value) in zip(self.__keys, self.__vals))", "def _as_dict(self):\n return dict(self.items())", "def to_dict(self) -> Dict[str, Any]:\n\n data = self._entry.to_dict()\n del data[\"item-hash\"]\n data[\"item\"] = [self._blob.to_dict()]\n\n return data", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def _tuples_to_dict(self, tuples):\n d = {}\n for key, value in tuples:\n d[key] = value\n return d", "def entry_dict(cls, feed_entry):\n return {\n 'id': feed_entry['id'],\n 'link': feed_entry['link'],\n 'published': pd.to_datetime(feed_entry['published']),\n 'title': feed_entry['title'],\n }", "def get_dict(self):\n return {key: value for key, value in zip(self._words, self._vecs)}", "def as_dict(self):\n for k, v in zip(self._input_names, self._flattened_inputs):\n yield k, v", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def as_dict(self):\n return dict(self.items())", "def lstToDict(key, value):\n return dict(zip(key, value))", "def dict() -> Dict[str, Pin]:", "def to_dict(self):\r\n try:\r\n # Create the dictionary, converting each attribute to a\r\n # string.\r\n dict_entry = {}\r\n dict_entry[\"id\"] = str(self.id)\r\n dict_entry[\"title\"] = str(self.title)\r\n dict_entry[\"date\"] = str(self.date)\r\n dict_entry[\"time\"] = str(self.time)\r\n dict_entry[\"datetime\"] = str(self.datetime)\r\n dict_entry[\"duration\"] = str(self.duration)\r\n dict_entry[\"notes\"] = str(self.notes)\r\n dict_entry[\"recurring\"] = str(self.recurring)\r\n dict_entry[\"rec_interval\"] = (\r\n io_utils.build_dict_string(self.rec_interval))\r\n dict_entry[\"rec_total\"] = str(self.rec_total)\r\n dict_entry[\"rec_child_seq\"] = str(self.rec_child_seq)\r\n dict_entry[\"rec_parent\"] = str(self.rec_parent)\r\n dict_entry[\"info\"] = io_utils.build_dict_string(self.info)\r\n return dict_entry\r\n except Exception as err:\r\n _z_exc(\"logentry.py/to_dict\", err)\r\n # end try\r", "def entries(self):\n if self.preload_metadata and not self._entries:\n self._entries = dict((self._decode_name(entry.key), entry)\n for entry in self.bucket.list())\n return self._entries", "def asdict():\n pass", "def _to_dict_tree(self):\n return DictTree(self.entries)", "def items(self):\n return list(zip(self.keys(), self.values()))", "def items(self):\n return list(zip(self.keys(), self.values()))", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def make_dict(keys, values):\n\n return dict(zip(keys, values))", "def dictionary(self):\n data = {}\n for i, col in enumerate(self.columns):\n key = col.get_display_tag(sort_attributes=True)\n if key and (not key in data) and (i < len(self.values)):\n data[key] = self.values[i]\n return data", "def from_thread_result_to_dictionary(returned_result):\n keys = []\n values = []\n\n for returned_result_item in returned_result:\n keys.append(returned_result_item[0])\n values.append(returned_result_item[1])\n\n dictionary = dict(zip(keys, values))\n return dictionary", "def value_map(self):\n return {attr: val for attr, val in zip(self.__slots__, self._values(to_str=True))}" ]
[ "0.7613547", "0.7377054", "0.67987955", "0.63767034", "0.6352516", "0.6342387", "0.63202107", "0.6266719", "0.62579256", "0.6247313", "0.623333", "0.6207049", "0.62052894", "0.61866677", "0.61613494", "0.61424756", "0.613092", "0.61234504", "0.6120027", "0.6097869", "0.60933405", "0.6086907", "0.6079275", "0.6079275", "0.6063583", "0.6059403", "0.6043224", "0.60295224", "0.60128665", "0.59941995" ]
0.73771703
1
Determine if a sysfs_gpu_name file indicates an AMD device
def _is_amd(sysfs_gpu_name): with open(sysfs_gpu_name) as src: return src.read().strip() == 'amdgpu'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def is_gpu_device(self, device):\n return device in self._gpu_devices", "def is_cuda_device(device):\n\treturn 'cuda' in str(device)", "def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)", "def _amd_index(sysfs_gpu_name):\n drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]\n return drop_prefix.split('/')[0]", "def is_cambrionix(device_dict):\n return device_dict.get('_name') in usb_config.CAMBRIONIX_NAMES", "def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False", "def isa(device_name):\n\n if not device_name:\n raise DmDeviceError(_(\"No device name given.\"))\n if device_name != os.path.basename(device_name):\n msg = _(\"Invalid device name %r given.\") % (device_name)\n raise DmDeviceError(msg)\n\n bd_dir = os.sep + os.path.join('sys', 'block', device_name)\n if not os.path.exists(bd_dir):\n return False\n\n dm_dir = os.path.join(bd_dir, 'dm')\n if not os.path.exists(dm_dir):\n return False\n\n return True", "def test_change_name_of_the_devicefalse():", "def _IsDevice(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(file_attribute_flags & pyfsntfs.file_attribute_flags.DEVICE)", "def is_booted_storage_device(disk):\n cmdline = (\"grep -w /ahcexport /proc/mounts | cut -d ' ' -f 1 | \"\n \"sed -e 's/[0-9]*//g'\")\n if '/dev/' not in disk:\n disk = '/dev/%s' % disk\n grep_cmd = subprocess.Popen(cmdline,\n shell=True, stdout=subprocess.PIPE)\n for booted_disk in grep_cmd.stdout:\n booted_disk = booted_disk.decode(errors='ignore')\n booted_disk = booted_disk.rstrip('\\n').strip()\n if booted_disk == disk:\n return True\n return False", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name", "def test_MCE_sysfs_initialized(self):\n num_of_mc_folders = self.get_num_of_mc_folders()\n code, num_cpus, err = systeminfo.Run([\"nproc\"])\n if int(num_of_mc_folders) == int(num_cpus):\n self.log.info(\"MCE sysfs device initialization successful\")\n else:\n self.fail(\"MCE sysfs device initialization failed\")", "def _on_gpu(self) -> bool:\n return self._current_device_index != CPU_INDEX", "def additional_capability_gpu_drivers_installed(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"additional_capability_gpu_drivers_installed\")", "def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def is_filesystem_enabled(dbapi, host_id_or_uuid, fs_name):\n filesystems = dbapi.host_fs_get_by_ihost(host_id_or_uuid)\n for fs in filesystems:\n if fs.name == fs_name:\n return True\n return False", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def GetGPU():\n return option['device_id']", "def _get_device_id() -> str:\n with open(\"/proc/cpuinfo\", \"r\") as f:\n for line in f.readlines():\n if line.startswith('Serial'):\n return line.split(':')[1].strip()\n return 'N/A'", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def export_gpu(entity=None): \n\tstatus = False \n\texportGrp = config.geoGrp\n\tres = entity.task_res()\n\tlibPath = entity.libPath()\n\n\tif res: \n\t\tabcName = entity.libName(config.libName.get('gpu'), res, ext='abc')\n\n\t\t# name without ext \n\t\tbasename = os.path.splitext(abcName)[0]\n\t\t\n\t\tgpuName = '{0}/{1}'.format(libPath, abcName)\n\n\t\tstart = pub_utils.file_time(gpuName)\n\n\t\t# export GPU command \n\t\tresult = maya_utils.exportGPUCacheGrp(exportGrp, libPath, basename, time='still')\n\t\t\n\t\tend = pub_utils.file_time(gpuName)\n\t\tsuccess = pub_utils.is_file_new(start, end)\n\n\t\tif success: \n\t\t\treturn True, 'Success %s' % gpuName\n\n\t\telse: \n\t\t\treturn False, 'Failed to export Gpu %s' % gpuName\n\n\telse: \n\t\treturn False, 'No res found'", "def test_change_name_of_the_devicetrue():", "def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;" ]
[ "0.6442702", "0.6077788", "0.60640377", "0.60526884", "0.6037231", "0.6019427", "0.601595", "0.599092", "0.5918574", "0.5823835", "0.5786603", "0.5764074", "0.5732489", "0.5730832", "0.5723959", "0.5701175", "0.56283104", "0.5625314", "0.56229156", "0.56229156", "0.5601314", "0.5579826", "0.55786043", "0.5567019", "0.55604476", "0.5535356", "0.55134195", "0.54809695", "0.54776216", "0.5472382" ]
0.83339846
0
Determine the gpu index given a sysfs_gpu_name
def _amd_index(sysfs_gpu_name): drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):] return drop_prefix.split('/')[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def deviceid(gpu):\n\n # Return if this is already a torch device\n # pylint: disable=E1101\n if isinstance(gpu, torch.device):\n return gpu\n\n # Always return -1 if gpu is None or an accelerator device is unavailable\n if gpu is None or not Models.hasaccelerator():\n return -1\n\n # Default to device 0 if gpu is True and not otherwise specified\n if isinstance(gpu, bool):\n return 0 if gpu else -1\n\n # Return gpu as device id if gpu flag is an int\n return int(gpu)", "def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def get_cuda_device(minor_idx):\n\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return 0\n\n for i in range(num_devices):\n output = subprocess.check_output([\"nvidia-smi\", '-q', '-i', str(i)])\n output_list = output.decode(\"utf-8\").split('\\n')\n output_list = [item for item in output_list if 'Minor' in item]\n num = int(output_list[0].split(':')[-1])\n if num == minor_idx:\n return i\n return 0", "def GetGPU():\n return option['device_id']", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def get_gpus():\n try:\n re = subprocess.check_output([\"nvidia-smi\", \"-L\"], universal_newlines=True)\n except OSError:\n return []\n return range(len([i for i in re.split('\\n') if 'GPU' in i]))", "def _current_device_index(self) -> int:\n device = PArray._get_current_device()\n if device is None: # not called inside current task\n return self._coherence.owner\n elif device.architecture == cpu:\n return CPU_INDEX\n else:\n # assume GPU here, won't check device.architecture == gpu\n # to avoid import `gpu`, which is slow to setup.\n return device.index", "def _get_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, \"\n f\"but only {n_gpu} are available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n self.logger.info(f'Using device: {device}, {list_ids}')\n return device, list_ids", "def get_gpu_utilization(gpu_num = None, verbose=False):\n if gpu_num != None:\n check_num(gpu_num)\n if verbose:\n cmd = \"nvidia-smi --query-gpu=index,gpu_name,gpu_bus_id,utilization.gpu,memory.used --format=csv\"\n res = str(subprocess.check_output(cmd, shell=True))\n [print(a) for a in res.split('\\\\n')[:-1]]\n cmd = \"nvidia-smi --query-gpu=utilization.gpu,memory.used --format=csv,nounits\"\n res = str(subprocess.check_output(cmd, shell=True))\n res= res.split('\\\\n')\n if gpu_num== None:\n return array([list(map(int,a.split(','))) for a in res[1:-1]])\n else:\n return array(list(map(int,res[gpu_num+1].split(','))))", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def device_index(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_index\")", "def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n return gpu_count", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now.'\n )\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now. (Please check whether you can execute `nvidia-smi` command.)'\n )\n gpu_count = 0\n return gpu_count", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def get_device_str(device_id, num_gpus):\n if num_gpus == 0:\n return \"/cpu:0\"\n device_str_output = \"/gpu:%d\" % (device_id % num_gpus)\n return device_str_output", "def get_free_gpu_memory(cuda_device_index):\n if sys.platform == \"darwin\":\n # No GPUs on darwin...\n return 0\n result = sp.check_output('nvidia-smi --query-gpu=memory.free '\n '--format=csv,nounits,noheader',\n shell=True)\n result = result.decode('utf-8').split('\\n')[:-1]\n log.verbose(f'The system has {len(result)} gpu(s).')\n free_mem = int(result[cuda_device_index])\n log.info(f'The {cuda_device_index}-th GPU has {free_mem} MB free.')\n if cuda_device_index >= len(result):\n raise ValueError(f\"Couldn't parse result for GPU #{cuda_device_index}\")\n return int(result[cuda_device_index])", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def get_device(i=0):\n if torch.cuda.is_available():\n return torch.device(\"cuda:%d\" % i)\n else:\n return torch.device(\"cpu\")", "def get_list_comp_ind(gpu):\n if gpu not in [0, 1, 2, 3, -1]:\n print('Your gpu index is not correct, check again')\n quit()\n data_dir = '/home/sr365/Bruce/cvdata'\n ind_list = []\n for file in os.listdir(data_dir):\n #print(file)\n # Check if this is a comp file\n if not file.endswith('.npy') or (not file[:-4].isdigit()):\n print('This file is {}, does not satisfy requirement, continue'.format(file))\n continue\n ind = int(file[:-4])\n #print('current comp ind is {}'.format(ind))\n ind_list.append(ind)\n #print(ind_list)\n length = len(ind_list)\n print(length)\n # If GPU == -1, return all list values\n if gpu == -1:\n return ind_list\n gpu_specific_list = ind_list[gpu*int(length / 4):(gpu+1)*int(length / 4)]\n print(len(gpu_specific_list))\n return gpu_specific_list", "def get_device_index(self, chip_name):\n index = self._dll.JLINKARM_DEVICE_GetIndex(chip_name.encode('ascii'))\n\n if index <= 0:\n raise errors.JLinkException('Unsupported device selected.')\n\n return index", "def check_gpu(self, values):\n try:\n process = subprocess.Popen(['nvidia-smi', '--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used', '--format=csv'], stdout=subprocess.PIPE)\n out_str, _ = process.communicate()\n gpu_strs = out_str.split('\\n')\n\n # Get rid of the column headers.\n if len(gpu_strs) > 0:\n gpu_strs = gpu_strs[1:]\n\n # Process each GPU string.\n multi_gpu = len(gpu_strs) > 1\n gpu_index = 1\n for gpu_str in gpu_strs:\n out = gpu_str.split(',')\n if len(out) > 1:\n if multi_gpu:\n values[keys.KEY_GPUX_NAME.replace('X', str(gpu_index))] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPUX_TEMPERATURE.replace('X', str(gpu_index))] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPUX_PERCENT.replace('X', str(gpu_index))] = int(out[7].strip(' \\t\\n\\r%%s'))\n gpu_index = gpu_index + 1\n else:\n values[keys.KEY_GPU_NAME] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPU_TEMPERATURE] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPU_PERCENT] = int(out[7].strip(' \\t\\n\\r%%s'))\n except:\n logging.error(\"Error collecting GPU stats.\")", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name" ]
[ "0.68445116", "0.6772102", "0.6700925", "0.6621827", "0.65894985", "0.6331592", "0.62258136", "0.62258136", "0.62258136", "0.61923295", "0.61809945", "0.6132849", "0.6131118", "0.61252695", "0.6123489", "0.6101626", "0.6023584", "0.6001534", "0.5931374", "0.59023625", "0.5886564", "0.5853852", "0.5853852", "0.5853852", "0.5814684", "0.5797789", "0.5765374", "0.57587636", "0.57504016", "0.5730266" ]
0.786728
0
Configures logging logging_config.json should have been placed in the directory AUTOMINE_LOG_DIR, to which this process must have read and write access
def _configure_logger(): try: log_dir = os.environ['AUTOMINE_LOG_DIR'] log_name = _log_name() cfg_path = os.path.join(log_dir, 'logging_config.json') with open(cfg_path) as src: cfg = json.load(src) handlers = cfg.get('handlers') for handler in iter(handlers.values()): filename = handler.get('filename') if filename: filename = filename.replace('{{AUTOMINE_LOG_DIR}}', log_dir) filename = filename.replace('{{__name__}}', log_name) handler['filename'] = filename loggers = cfg.get('loggers') if '__name__' in loggers: loggers[log_name] = loggers.pop('__name__') # add logging to the console if env var is set log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ if log_to_console and 'console' in handlers: logger_handlers = loggers[log_name].get('handlers') if logger_handlers: logger_handlers.append('console') dictConfig(cfg) except Exception as err: # pylint: disable=broad-except logging.basicConfig() raise err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)", "def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):\n log_config = Path(log_config)\n if log_config.is_file():\n config = read_json(log_config)\n # modify logging paths based on run config\n for _, handler in config['handlers'].items():\n if 'filename' in handler:\n handler['filename'] = str(save_dir / handler['filename'])\n\n logging.config.dictConfig(config)\n else:\n print(\"Warning: logging configuration file is not found in {}.\".format(log_config), file=sys.stderr)\n logging.basicConfig(level=default_level)", "def initialize_logging(self):\n logging_config_path = self.pyleus_config.get('logging_config_path')\n if logging_config_path:\n logging.config.fileConfig(logging_config_path)\n elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):\n logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)", "def _configure_logging(self):\n pass", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def start_logging(self):\n text = _DEFAULT_LOG_CONFIG\n path = self.bindings.get('LOG_CONFIG', None)\n if path:\n try:\n with open(path, 'r') as f:\n text = f.read()\n except Exception as ex:\n print 'ERROR reading LOGGING_CONFIG from {0}: {1}'.format(path, ex)\n raise\n config = ast.literal_eval(args_util.replace(text, self.bindings))\n logging.config.dictConfig(config)\n log_path = os.path.join(\n self.bindings['LOG_DIR'], self.bindings['LOG_FILEBASE'] + '.log')\n os.chmod(log_path, 0600)\n\n self.__journal = global_journal.get_global_journal()\n if self.__journal is None:\n # force start\n journal_path = os.path.join(\n self.bindings['LOG_DIR'],\n self.bindings['LOG_FILEBASE'] + '.journal')\n self.__journal = global_journal.new_global_journal_with_path(journal_path)", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def setup_root_logger(loglevel=logging.DEBUG, logdir=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Logs'),\n log_config_file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Utils', 'cent_logger.json')):\n try:\n\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n if log_config_file is not None and os.path.exists(log_config_file):\n with open(log_config_file, 'rt') as logconf:\n config = json.load(logconf)\n # create absolute path for logfile\n config['handlers']['file_handler']['filename'] = logdir + '/' + config['handlers']['file_handler']['filename']\n config['handlers']['longterm']['filename'] = logdir + '/' + config['handlers']['longterm']['filename']\n config['handlers']['single_run']['filename'] = logdir + '/' + config['handlers']['single_run']['filename']\n root_logger = logging.getLogger(\"framework\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the framework logger\")\n root_logger.info(\"Configured basic root logger from: {}\".format(log_config_file))\n test_logger = logging.getLogger(\"tests\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the tests logger\")\n test_logger.info(\"Configured basic tests logger from: {}\".format(log_config_file))\n\n # disable logs from below external modules\n for disabled_module in config['disable_module_logs']:\n root_logger.debug('Disabled logging for module: {}'.format(disabled_module))\n logging.getLogger(disabled_module).disabled = True\n\n except Exception as e:\n print(\"Error configuring logger: {}\".format(e), file=sys.stderr)\n raise e#", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def configLogging():\n # define a basic logger to write to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/execute_pomset.log',\n filemode='w')\n\n # end def configureLogging\n pass", "def init():\n global logger\n\n with open(\"/app/log.json\", \"r\") as fd:\n logging.config.dictConfig(json.load(fd))\n\n logger = logging.getLogger()", "def setup_logging(\n default_conf_path='logging.json', \n default_level=logging.INFO,\n env_key='LOG_CFG',\n logging_path=None\n):\n path_found = False\n path = default_conf_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n print('Found logging configuration file at ' + default_conf_path + '\\n')\n with open(path, 'rt') as f:\n config = json.load(f)\n\n if logging_path and 'handlers' in config:\n logging_path = os.path.abspath(logging_path)\n print('Writing log at ' + logging_path + '\\n')\n mkdir_p(os.path.abspath(os.path.dirname(logging_path)))\n for key, value in config['handlers'].iteritems():\n if 'filename' in value:\n value['filename'] = logging_path\n path_found = True\n\n logging.config.dictConfig(config)\n else:\n print('Could not find logging configuration at '+ default_conf_path + '\\n')\n print('Using default logging option on console' + '\\n')\n logging.basicConfig(level=default_level)\n\n logging.captureWarnings(capture=True)\n return path_found", "def pytest_logger_logsdir(self, config):", "def init_config() -> None:\n config_file = importlib.resources.files(\"houdini_toolbox.logging\").joinpath(\n \"config.json\"\n )\n\n with config_file.open(encoding=\"UTF-8\") as handle:\n config = json.load(handle)\n logging.config.dictConfig(config)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def test_logging_config(self):\n topdir = os.path.dirname(os.path.dirname(__file__))\n # logging config from default\n os.system('rm %s/logging.conf' % topdir)\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)\n # logging config from file\n os.system('cp %s/logging.conf.sample %s/logging.conf' %\n (topdir, topdir))\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)", "def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)", "def _setup_logging(log_config: Path = LOG_CONFIG_FILE, silent: bool = False) -> None:\n\n if not log_config.is_file():\n raise RuntimeError(\n \"Logging file {log_file} not found\".format(log_file=log_config)\n )\n\n with log_config.open() as log_file:\n config_orig = yaml.safe_load(log_file.read()) # type: Any\n\n def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Prepend `LOGS_DIR` to all 'filename' attributes listed for handlers in logging.yaml\n :param config: Configuration dictionary\n :return: Configuration with 'filename's prepended with LOGS_DIR\n \"\"\"\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config\n\n config = prepare_filenames(config_orig)\n # for some reason, pyright fails with \"'config' is not a known member of module\"\n # even though this is an officially documented member of logging\n # for now we ignore the type\n logging.config.dictConfig(config) # type: ignore\n if silent:\n _remove_non_file_handlers()", "def setup_logging():\n if not app.debug:\n if app.config.get('LOG_CFG'):\n # initialize the Flask logger (removes all handlers)\n _ = app.logger\n dictConfig(app.config.get('LOG_CFG'))\n else:\n # capability with previous config settings\n # Should have LOG_FILE and LOG_LEVEL set\n if app.config.get('LOG_FILE') is not None:\n handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)\n else:\n handler = StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))\n app.logger.addHandler(handler)", "def setup_logging(\n module,\n default_level=logging.INFO,\n env_key='LOG_CFG',\n logpath=os.getcwd(),\n config_path=None\n):\n\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")\n fpath = os.path.join(logpath, module, timestamp)\n\n path = config_path if config_path is not None else os.getenv(env_key, None)\n if path is not None and os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n for h in config['handlers'].values():\n if h['class'] == 'logging.FileHandler':\n h['filename'] = os.path.join(logpath, module, timestamp, h['filename'])\n touch(h['filename'])\n for f in config['filters'].values():\n if '()' in f:\n f['()'] = globals()[f['()']]\n logging.config.dictConfig(config)\n else:\n lpath=os.path.join(logpath, timestamp)\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n logging.basicConfig(level=default_level, filename=os.path.join(lpath,\"base.log\"))", "def configure_logging(logdir=None):\n logconfig = LOGCONFIG_DICT.copy()\n if logdir:\n debugfile = os.path.join(logdir, DEBUGFILE)\n logconfig['handlers']['debugfile']['filename'] = debugfile\n errorfile = os.path.join(logdir, ERRORFILE)\n logconfig['handlers']['errorfile']['filename'] = errorfile\n\n logging.config.dictConfig(logconfig)", "def test_logging_config_file(self, monkeypatch):\n # We still want the Formatter to be configured.\n assert logging.Formatter.converter == time.gmtime\n assert logging.Formatter.default_time_format == '%Y-%m-%dT%H:%M:%S'\n assert logging.Formatter.default_msec_format == '%s.%03d'\n\n # Set NETDUMPLINGS_LOGGING_CONFIG to point to a test logging config.\n logging_config_file = 'tests/data/logging.json'\n monkeypatch.setenv('NETDUMPLINGS_LOGGING_CONFIG', logging_config_file)\n\n configure_logging()\n\n # The test config file sets all the loggers to ERROR.\n assert logging.getLogger('netdumplings').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplinghub').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingkitchen').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingeater').level == logging.ERROR", "def _configure_logging(config):\n # Initialize exception logging to Sentry with client DSN URL from SENTRY_DSN envvar;\n # does nothing if SENTRY_DSN does not exist, is empty, or is not recognized by Sentry\n sentry_sdk.init()\n if \"publisher\" in config[\"logging\"]:\n # Publish log messages to distributed logging aggregator\n logging_config = config[\"logging\"][\"publisher\"]\n logging_config[\"handlers\"][\"zmq_pub\"][\"context\"] = context\n host = config[\"zmq\"][\"host\"]\n port = config[\"zmq\"][\"ports\"][\"logging\"][NAME]\n addr = f\"tcp://*:{port}\"\n logging_config[\"handlers\"][\"zmq_pub\"][\"interface_or_socket\"] = addr\n logging.config.dictConfig(logging_config)\n for handler in logger.root.handlers:\n if isinstance(handler, zmq.log.handlers.PUBHandler):\n handler.root_topic = NAME\n handler.formatters = {\n logging.DEBUG: logging.Formatter(\"%(message)s\\n\"),\n logging.INFO: logging.Formatter(\"%(message)s\\n\"),\n logging.WARNING: logging.Formatter(\"%(message)s\\n\"),\n logging.ERROR: logging.Formatter(\"%(message)s\\n\"),\n logging.CRITICAL: logging.Formatter(\"%(message)s\\n\"),\n }\n # Not sure why, but we need a brief pause before we start logging\n # messages\n time.sleep(0.25)\n msg = f\"publishing logging messages to {addr}\"\n else:\n # Write log messages to local file system\n #\n # Replace logging RotatingFileHandlers with WatchedFileHandlers so\n # that we notice when log files are rotated and switch to writing to\n # the new ones\n logging_config = config[\"logging\"]\n logging_handlers = logging_config[\"handlers\"]\n rotating_handler = \"logging.handlers.RotatingFileHandler\"\n watched_handler = \"logging.handlers.WatchedFileHandler\"\n for handler in logging_handlers:\n if logging_handlers[handler][\"class\"] == rotating_handler:\n logging_handlers[handler][\"class\"] = watched_handler\n del logging_handlers[handler][\"backupCount\"]\n logging.config.dictConfig(logging_config)\n msg = \"writing logging messages to local file system\"\n return msg", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def configure_logging():\n configuration = get_configuration()\n logging.basicConfig(**configuration.get('logging', {}))\n\n logging.debug('Logging configured.')", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)", "def setup_logging_with_config(config: DynaBox):\n global logger\n logger = setup_logging_threatbus(config, logger_name)" ]
[ "0.82155377", "0.75840616", "0.73912275", "0.7389693", "0.72844446", "0.7030196", "0.70190215", "0.7017492", "0.7016571", "0.6946439", "0.69230664", "0.68668836", "0.6850633", "0.6848561", "0.6812513", "0.6809024", "0.6786257", "0.6777338", "0.6761215", "0.6757546", "0.67443126", "0.6727407", "0.67229307", "0.671431", "0.6709877", "0.670922", "0.6704961", "0.6687837", "0.667207", "0.66378266" ]
0.81726515
1
Endpoint to display create item page.
def create_item_page(): catagories = [c.name for c in Catagory.fetch_all()] return render_template('add_item.html', catagories=catagories, values={})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))", "def create_item(self, user: User, **kwargs) -> None:", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def issueCreate(request):\n args = { 'statusForm' : forms.itemStatusForm(), }\n return render_to_string('issueCreate.html', args,\n context_instance=RequestContext(request))", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def create():\r\n form = ArticleForm(request.form)\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = uuid.uuid4().hex\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully added')\r\n return redirect(url_for('article.list'))\r\n\r\n return render_template('article/form.html', add_article=True,\r\n form=form, title='Add Article')", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def create(request):\n if request.method == \"POST\":\n form = InitialInvoice(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": ItemForm(),\n \"stage\": \"2\",\n \"initial_data\": data\n })\n\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InitialInvoice(),\n \"stage\": \"1\"\n })", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def goto_create(self):\n\n self.create.click()", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def test_create_item(self):\n\n url = reverse('stock-item-create')\n\n response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from a valid item, valid location\n response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from an invalid item, invalid location\n response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def new():\n session = current_app.config['db']\n if request.method == \"POST\":\n new_name = request.form['itemname']\n try:\n item = WineABV(name=new_name)\n session.add(item)\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n item = WineABV(name=new_name)\n return render_template(template_prefix+'/new_form.html', item=item)\n\n flash(\"Successfully Added '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n item = WineABV(name=\"\")\n return render_template(template_prefix+'new_form.html', item=item)", "def go_to_create_tag():\n\n posts = Post.query.all()\n return render_template('tags/new.html', posts=posts)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def add_items_handler():\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n category_id = rq['category_id']\n item = addItem(name, picture, description, category_id, g.user.id)\n return jsonify(item=item.serialize)" ]
[ "0.74668145", "0.71456575", "0.6886429", "0.6816857", "0.68146276", "0.6764137", "0.65232", "0.6477503", "0.6445127", "0.6436602", "0.6388984", "0.6381548", "0.634037", "0.6285156", "0.6273723", "0.624713", "0.62171143", "0.6188557", "0.6186302", "0.61858845", "0.61284363", "0.61182874", "0.61075354", "0.6101788", "0.6090157", "0.6083825", "0.6071801", "0.6023216", "0.59993005", "0.5965686" ]
0.7436075
1
Post endpoint to create an item. If form is invalid will return create item page with errors displayed, otherwise create item and redirect to item page.
def create_item(): name = request.form['name'] catagory = request.form['catagory'] description = request.form['description'] errors = form_errors(request.form) if errors: catagories = [c.name for c in Catagory.fetch_all()] values = { 'name': name, 'catagory': catagory, 'description': description } return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) Item.create(name, catagory_name=catagory, description=description) return redirect(url_for( 'read_item', catagory_name=catagory, item_name=name ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def add_item():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n name = request.form['name']\n url = request.form['url']\n photo_url = request.form['photo_url']\n description = request.form['description']\n category = request.form['item_category']\n # Retrieve the database ID of the selected category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Retrieve user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print('Database ID of category is {}.'.format(category_id.id))\n # Flash messages for incomplete item info\n if not request.form['name']:\n flash('Please add item name')\n return redirect(url_for('add_item'))\n if not request.form['url']:\n flash('Please add item URL')\n return redirect(url_for('add_item'))\n if not request.form['photo_url']:\n flash('Please add item photo URL')\n return redirect(url_for('add_item'))\n if not request.form['description']:\n flash('Please add a description')\n return redirect(url_for('add_item'))\n # Query database for item name\n item_name_in_db = (session.query(Items.name)\n .filter_by(name=name)\n .all())\n # If the item name is already in the database, don't add\n if item_name_in_db:\n print('Item name \"{}\" already in database.'.format(name))\n flash('Item name \"{}\" already in database.'.format(name))\n return redirect(url_for('add_item'))\n # Create object with form field info to add to database\n new_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n session.add(new_item)\n session.commit()\n print('Item \"{}\" created.'.format(new_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('add_item.html',\n categories=categories,\n login_status=login_status)", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))", "def add_item(request):\n \n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, 'New item added successfully!')\n return redirect(reverse('add_item'))\n else:\n messages.error(request, 'Failed to add item. Please check the form.')\n else:\n form = ProductForm()\n \n template = 'products/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def new():\n session = current_app.config['db']\n if request.method == \"POST\":\n new_name = request.form['itemname']\n try:\n item = WineABV(name=new_name)\n session.add(item)\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n item = WineABV(name=new_name)\n return render_template(template_prefix+'/new_form.html', item=item)\n\n flash(\"Successfully Added '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n item = WineABV(name=\"\")\n return render_template(template_prefix+'new_form.html', item=item)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def post():\n\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n return redirect(url_for('index'))\n return render_template('new.html', form=form)", "def post(self):\n try:\n new_form = FORM_SCHEMA.load(request.json).data\n except ValidationError as err:\n APP.logger.error(err.args)\n return err.messages, status.HTTP_400_BAD_REQUEST\n\n add_new_form = Form(**new_form)\n DB.session.add(add_new_form)\n\n try:\n DB.session.commit()\n except IntegrityError as err:\n APP.logger.error(err.args)\n DB.session.rollback()\n return {'error': 'Already exists.'}, status.HTTP_400_BAD_REQUEST\n return Response(status=status.HTTP_201_CREATED)", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def addItem(category_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to add item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n if request.method == 'POST':\r\n # create operation\r\n name = request.form['name']\r\n description = request.form['description']\r\n if not name:\r\n flash('Add ItemError: Name can\\'t be empty')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n newItem = Item(name=name, description=description,\r\n category_id=category_id, user_id=category.user_id)\r\n session.add(newItem)\r\n session.commit()\r\n flash('Added Item \\'{}\\' Successfully!'.format(newItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with the form\r\n return render_template(\"addItem.html\", category=category)", "def create():\r\n form = ArticleForm(request.form)\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = uuid.uuid4().hex\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully added')\r\n return redirect(url_for('article.list'))\r\n\r\n return render_template('article/form.html', add_article=True,\r\n form=form, title='Add Article')", "def create_item(self, user: User, **kwargs) -> None:", "def add():\n if request.method == \"POST\":\n result = add_post(\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n return render_template(\"add.html\")", "def add_item(request, shoppinglist_id, category_id=False, product_id=False):\n if request.method == 'POST':\n form = ItemForm(request.POST)\n if form.is_valid():\n shoppinglist = get_object_or_404(\n Shoppinglist,\n pk=shoppinglist_id,\n pantry__owner=request.user\n )\n product = get_object_or_404(Product, pk=product_id)\n try:\n item = Item.objects.get(shoppinglist=shoppinglist,\n product=product)\n item.amount += form.cleaned_data['amount']\n except ObjectDoesNotExist:\n item = Item(shoppinglist=shoppinglist,\n product=product,\n amount=form.cleaned_data['amount'],\n bought=False)\n item.save()\n return redirect('shoppinglists.views.detail', shoppinglist_id)\n\n response_dict = {'shoppinglist_id': shoppinglist_id,\n 'categories': Category.objects.all(),\n 'logged': False}\n if category_id:\n response_dict.update(\n {'category_id': category_id,\n 'category': Category.objects.get(pk=category_id),\n 'products': Product.objects.filter(categories__pk=category_id)}\n )\n if product_id:\n response_dict.update(\n {'form': ItemForm(),\n 'product': Product.objects.get(pk=product_id),\n 'product_id': product_id}\n )\n return render_to_response('shoppinglists/item_form.html',\n response_dict,\n context_instance=RequestContext(request))", "def save_item(item, item_id):\n # User is modifying an EXISTING item in the database\n if item_id > 0:\n item.Item.name = request.form['title']\n item.Item.description = request.form['description']\n item.Item.category_id = request.form['category']\n session.add(item.Item)\n session.commit()\n flash(\"Updated \" + item.Item.name)\n return render_template('item_details.html', item=item, login_session=login_session)\n\n # User is creating a NEW item\n else:\n new_item = Item(name=request.form.get('title'), description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['userid'])\n session.add(new_item)\n session.commit()\n flash(\"Created \" + new_item.name)\n created_item = session.query(Item, User).filter(Item.id == new_item.id).join(User).first()\n return render_template('item_details.html', item=created_item, login_session=login_session)", "def add_view(self, request):\r\n instance_form = self.get_minimal_add_form()\r\n form = instance_form(request.POST, request.FILES, prefix=self.base_url())\r\n\r\n new_instance = None\r\n if form.is_valid():\r\n new_instance = form.save()\r\n template = select_template(self.item_add_template)\r\n context = RequestContext(request)\r\n context.update({\r\n \"insert\": self,\r\n \"form\": form,\r\n \"object\": new_instance\r\n })\r\n response = HttpResponse(template.render(context))\r\n response.status_code = 201\r\n return response\r\n response = HttpResponse(form.errors)\r\n response.status_code = 400\r\n return response", "def newMenuItemPage(restaurant_id):\n restaurant = db_methods.searchResByID(restaurant_id)\n res_id = restaurant_id\n user_id = login_session['user_id']\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.addNewMenuItem(user_id, item_name, item_price, \n item_desc, item_course, res_id)\n time.sleep(0.1)\n return redirect(\"/restaurants/%s/menu/\" % res_id)\n else:\n error = \"Please be sure to fill out all required fields.\"\n return render_template('newmenuitem.html', error = error)\n else:\n return render_template('newmenuitem.html', res_id = res_id)", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def test_add_item_using_post(self):\n pass", "def new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(pub_date=datetime.date.today())\n post.title = form.title.data\n post.content = form.content.data\n post.slug = slugify(post.title)\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('new.html', form=form)" ]
[ "0.7512342", "0.74192584", "0.7168803", "0.7162308", "0.6992464", "0.68776226", "0.6828334", "0.67595434", "0.6719401", "0.67075944", "0.66212463", "0.66060036", "0.65833145", "0.65363026", "0.6523961", "0.6484986", "0.6414084", "0.6341097", "0.6312691", "0.6282003", "0.62772465", "0.6200028", "0.61978626", "0.61482304", "0.60983765", "0.6088804", "0.6073651", "0.60616446", "0.6033742", "0.6027761" ]
0.7602898
0
Endpoint to display update item page.
def update_item_page(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'edit_item.html', catagories=catagories, values={ 'name': item.name, 'catagory': item.catagory_name, 'description': item.description }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)", "def update_item(item_id):\n edited_item = session.query(Item).filter_by(id=item_id).one()\n\n # redirect to details page if current user does not own item\n if edited_item.user_id != login_session['user_id']:\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n\n if request.method == 'POST':\n if request.form['category']:\n edited_item.category_id = request.form['category']\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n edited_item.updated_date = datetime.datetime.now()\n session.add(edited_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n else:\n categories = session.query(Category).all()\n return render_template(\n 'views/edit.html',\n edited_item=edited_item,\n categories=categories)", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def update():\n return 'update api in put'", "def item_edit(context, request, render=None):\n if render is None:\n render = request.params.get('render', True)\n properties = request.validated\n # This *sets* the property sheet\n request.registry.notify(BeforeModified(context, request))\n context.update(properties)\n request.registry.notify(AfterModified(context, request))\n if render == 'uuid':\n item_uri = '/%s' % context.uuid\n else:\n item_uri = request.resource_path(context)\n if asbool(render) is True:\n rendered = embed(request, item_uri + '?embed=false')\n else:\n rendered = item_uri\n request.response.status = 200\n result = {\n 'status': 'success',\n '@type': ['result'],\n '@graph': [rendered],\n }\n return result", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = ItemForm()\n\n # If the form is validated, update the item with its data to the\n # database\n if form.validate_on_submit():\n\n # If the item name or sport has been modified, check that an\n # item with the same name and sport does not already exist, or\n # send a flash message and do not add the new item to the\n # database\n if form.name.data != item.name or form.sport.data != item.sport:\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n return redirect(url_for('items.edit_item',\n item_name=item_name))\n\n # If the item name or sport has not been modified, update all\n # details to the database, send a flash message, and redirect\n # to 'home'\n else:\n item.name = form.name.data\n item.sport = form.sport.data\n item.category = form.category.data\n item.description = form.description.data\n item.private = form.private.data\n db.session.commit()\n flash(f'\"{item.name}\" has been updated!', 'good')\n return redirect(url_for('items.item', item_name=item_name))\n\n # If the form is being requested, not submitted, pre-fill the form\n # with existing item data\n elif request.method == 'GET':\n form.name.data = item.name\n form.sport.data = item.sport\n form.category.data = item.category\n form.description.data = item.description\n form.private.data = item.private\n\n return render_template('edit_item.html', item=item, form=form)", "def edit_item(request, item_id):\n if request.user.is_superuser:\n item = get_object_or_404(Product, pk=item_id)\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES, instance=item)\n if form.is_valid():\n form.save()\n messages.success(request, 'Item was successfully updated.')\n return redirect(reverse('item_info', args=[item.id]))\n else:\n messages.error(request, 'There was an issue updating the '\n 'item. Please make sure the form is valid.')\n else:\n form = ProductForm(instance=item)\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/edit_item.html'\n context = {\n 'form': form,\n 'item': item,\n }\n\n return render(request, template, context)", "def edit(item_id):\n session = current_app.config['db']\n item = session.query(WineABV).filter_by(id=item_id).one()\n if request.method == \"POST\":\n new_name = request.form['itemname']\n item.name = new_name\n try:\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n return render_template('edit_form.html', item=item)\n\n flash(\"Successfully Edited '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n return render_template(template_prefix+'edit_form.html', item=item)", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def edit_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find a item with that id\", category='warning')\n return redirect(request.referrer)\n\n form = ItemForm()\n form.editting_item_id = item_id\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n item.category_id = form.category_id.data.id\n item.name = form.name.data.capitalize()\n item.description = form.description.data\n db.session.commit()\n flash('Successfully updated Item', 'success')\n return redirect(url_for('url.index'))\n\n elif request.method == 'GET':\n form.name.data = item.name\n form.description.data = item.description\n\n return render_template(\n 'forms/form.html',\n form_title='Edit Item',\n form=form,\n form_name='item',\n action=url_for('url.edit_item', item_id=item_id))", "def issueUpdateView(context, issue):\n\n user = context.get('user')\n\n if not user.has_perm('IssueTracker.can_change'):\n return \"\"\n\n if issue.item:\n item = issue.item.item\n \n args = {\n \"form\": forms.UpdateMachineForm(instance=item),\n }\n\n return render_to_string('issueUpdate.html', args, context)\n\n return \"\"", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def editItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n editedItem.user_id = login_session['user_id']\n session.add(editedItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('edititem.html', sport_id=sport_id,\n item_id=item_id, sport=sport, item=editedItem)", "def update(request):\n return 0", "def updateItem(self, object):\n pass", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def update(_id): \n pages_object = Pages(_id)\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Update page\n if request.method == 'POST':\n if pages_object.update():\n return redirect(url_for('pages.overview'))\n \n len_of_label = len(page['label'])\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/update.html'.format(MODULE_DIR), **locals())", "def edit_item(item_id):\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = session.query(Item).filter_by(id=item_id).one()\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n session.add(item)\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})", "def update_item(self, table, item):", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def edit_items(request):\n token = getToken(request)\n superUser = isSuperUser(token)\n if superUser == True:\n id = request.data['id']\n try:\n items = Items.objects.get(id=id)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ItemsSerializer(items, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})" ]
[ "0.6676208", "0.64609563", "0.64174616", "0.63904625", "0.63857603", "0.6339873", "0.6314984", "0.62889534", "0.6269479", "0.6255692", "0.6253654", "0.624584", "0.62428105", "0.6238067", "0.62355477", "0.6230332", "0.62169313", "0.6211939", "0.62091535", "0.61984015", "0.618539", "0.6185296", "0.6174242", "0.6163913", "0.61522734", "0.61456543", "0.6123437", "0.6117977", "0.6110631", "0.6098802" ]
0.675567
0
Return dict containing form validation errors for create / update item.
def form_errors(form): errors = {} max_name_length = Item.name.property.columns[0].type.length if not form.get('name', None): errors['name'] = 'Please enter a name.' elif len(form['name']) > max_name_length: errors['name'] = ( 'Name must be less than %s characters.' % max_name_length ) if not Catagory.exists(form.get('catagory', None)): errors['catagory'] = 'Not a valid catagory.' if not form.get('description', None): errors['description'] = 'Please enter a description.' return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_form_error(self):\n errors = {}\n if self._form_error:\n errors[\"base\"] = self._form_error\n self._form_error = None\n return errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def render_errors(form):\n return {\n \"form\": form\n }", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def form_invalid_add_global_errormessages(self, form):\n if self.get_selected_items_form_attribute() in form.errors:\n errormessages = form.errors[self.get_selected_items_form_attribute()]\n for errormessage in errormessages:\n messages.error(self.request, errormessage)", "def _generate_for_errors_object_when_updating(user_request):\n err_dict = {}\n for field in ['location', 'destination']:\n if field in user_request:\n err_dict[field] = \\\n [serialization_errors['cannot_update_flight_field_with_bookings'].format(field)]\n\n return err_dict", "def describe_invalid_form(form):\n return dict((i.name, i.note) for i in form.inputs if i.note is not None)", "def _validate_error(cls, item):\n if item.error and item.status_code not in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error: %s for job is not empty but '\n 'job status is %s' % (item.id, item.error, item.status_code))\n\n if not item.error and item.status_code in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error for job is empty but '\n 'job status is %s' % (item.id, item.status_code))", "def get_field_errors(self, field):\r\n identifier = format_html('{0}.{1}', self.form_name, field.name)\r\n return self.error_class([SafeTuple((identifier, '$pristine', '$pristine', 'invalid', e))\r\n for e in self.errors.get(field.name, [])])", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def validate():\n if request.method != 'POST':\n abort(400)\n\n is_update = True if request.args.get('is_update') == 'True' else False\n data = request.json or MultiDict({})\n formdata = MultiDict(data or {})\n form = AuthorUpdateForm(formdata=formdata, is_update=is_update)\n form.validate()\n\n result = {}\n changed_msgs = dict(\n (name, messages) for name, messages in form.messages.items()\n if name in formdata.keys()\n )\n result['messages'] = changed_msgs\n\n return jsonify(result)", "def form_invalid(self, form, request):\n if request.is_ajax():\n errors_dict = {}\n if form.errors:\n for error in form.errors:\n e = form.errors[error]\n errors_dict[error] = unicode(e)\n return HttpResponseBadRequest(json.dumps(errors_dict))\n else:\n return self.render_to_response(self.get_context_data(form=form))", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def get_field_errors(self, bound_field):\r\n errors = super(NgFormValidationMixin, self).get_field_errors(bound_field)\r\n identifier = format_html('{0}.{1}', self.form_name, self.add_prefix(bound_field.name))\r\n errors_function = '{0}_angular_errors'.format(bound_field.field.__class__.__name__)\r\n try:\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, errors_function)\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n except (TypeError, AttributeError):\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors')\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n errors.append(SafeTuple((identifier, '$dirty', '$valid', 'valid', ''))) # for valid fields\r\n errors.extend([SafeTuple((identifier, '$dirty', pe[0], 'invalid', force_text(pe[1])))\r\n for pe in potential_errors])\r\n return errors", "def validation_errors(self):\n return self._validation_errors", "def validate(self):\n\n form = CallEventForm(self.data)\n if not form.is_valid():\n self.errors = form.errors\n map_dict_fields(self.errors, const.DB_FIELDS, const.API_FIELDS)", "def v_err(flaw):\n error_messages = {\n 'no_season': _(\n \"Season must contain at least 4 alphanumeric characters.\"\n ),\n 'no_items': _(\n \"Menu must contain at least 1 item.\"\n ),\n 'no_name': _(\n \"Name field must contain at least 4 alphanumeric characters.\"\n ),\n 'no_desc': _(\n \"Description must contain at least 10 characters.\"\n ),\n 'no_chef': _(\n \"Item must belong to a chef.\"\n ),\n 'no_ing': _(\n \"Item must contain at least 1 ingredient.\"\n ),\n 'elapsed': _(\n \"This date has elapsed.\"\n )\n }\n raise forms.ValidationError(\n error_messages[flaw],\n code=flaw,\n )", "def _post_clean(self):\r\n super(NgModelFormMixin, self)._post_clean()\r\n if self._errors and self.prefix:\r\n self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items())", "def request_validation_error(error):\n message = str(error)\n app.logger.error(message)\n return {\n 'status_code': status.HTTP_400_BAD_REQUEST,\n 'error': 'Bad Request',\n 'message': message\n }, status.HTTP_400_BAD_REQUEST", "def validate(self):\n errors = {}\n for typ, items in self._items.iteritems():\n for name, spec in items.iteritems():\n assert hasattr(spec, 'validate'), 'Does %s:%s descend from FrodoBase?' % (name, spec)\n spec_errors = spec.validate()\n if spec_errors:\n errors[name] = spec_errors\n return errors\n\n # sys.modules[__name__] = Configuration()", "def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}", "def test_form_errors(self):\n form = self.response.context.get('form')\n self.assertTrue(form.errors)", "def handle_validation_error(self, error, bundle_errors):\n \n error_str = six.text_type(error)\n error_msg = self.help.format(error_msg=error_str) if self.help else error_str\n msg = {self.name: error_msg}\n\n if bundle_errors:\n return error, msg\n flask_restful.abort(400, message=msg)", "def form_invalid(self, form, request):\n return", "def form_invalid(self, form, request):\n return", "def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors", "def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors", "def validate_form(form, collection):\r\n\r\n # variable initialization\r\n max_title = 50\r\n max_ingredients = 500\r\n max_method = 1500\r\n max_recipe_URL = 250\r\n max_servings = 100\r\n max_category_name = 50\r\n max_category_URL = 250\r\n max_review = 250\r\n error_list = []\r\n\r\n # validates recipe form\r\n if collection == 'recipe':\r\n if not form['title'] or len(form['title']) > max_title:\r\n error_list.append(\r\n 'Title must not be empty or more than {} characters!'\r\n .format(max_title)\r\n )\r\n\r\n ingredient = form['ingredients']\r\n if not ingredient or len(ingredient) > max_ingredients:\r\n error_list.append(\r\n 'Ingredients must not be empty or more than {} characters!'\r\n .format(max_ingredients)\r\n )\r\n\r\n if not form['method'] or len(form['method']) > max_method:\r\n error_list.append(\r\n 'Method must not be empty or more than {} characters!'\r\n .format(max_method)\r\n )\r\n\r\n if 'appliance_categories' not in form:\r\n error_list.append(\r\n 'At least one of the appliances should be checked!'\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_recipe_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!!'\r\n .format(max_recipe_URL)\r\n )\r\n\r\n try:\r\n if not form['servings'] or int(form['servings']) > max_servings:\r\n error_list.append(\r\n 'Servings must not be empty or more than {}!'\r\n .format(max_servings)\r\n )\r\n\r\n except ValueError:\r\n error_list.append('Servings is not a number!')\r\n\r\n # validates recipe category form\r\n elif collection == 'recipe_category':\r\n if not form['name'] or len(form['name']) > max_category_name:\r\n error_list.append(\r\n 'Category name must not be empty or more than {} characters!'\r\n .format(max_category_name)\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_category_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!'\r\n .format(max_category_URL)\r\n )\r\n\r\n # validates review form\r\n elif collection == 'review':\r\n if not form['review'] or len(form['review']) > max_review:\r\n error_list.append(\r\n 'Review must not be empty or more than {} characters!'\r\n .format(max_review)\r\n )\r\n\r\n # returns errors on an empty list\r\n return error_list" ]
[ "0.7247966", "0.65767854", "0.65024495", "0.6369957", "0.62522954", "0.6153304", "0.6130591", "0.6103089", "0.6074817", "0.6054684", "0.5964537", "0.5951978", "0.594963", "0.59282666", "0.59004563", "0.58859175", "0.5877393", "0.5859856", "0.58356667", "0.5771657", "0.5703528", "0.5685815", "0.56731915", "0.56563056", "0.5656086", "0.5612645", "0.5612645", "0.5612308", "0.5612098", "0.56110543" ]
0.7545022
0
r"""Chooses a BoTorch `MarginalLogLikelihood` class using the given `Model` class.
def choose_mll_class( model_class: Type[Model], state_dict: Optional[Dict[str, Tensor]] = None, refit: bool = True, ) -> Type[MarginalLogLikelihood]: # NOTE: We currently do not support `ModelListGP`. This code block will only # be relevant once we support `ModelListGP`. if (state_dict is None or refit) and issubclass(model_class, ModelListGP): return SumMarginalLogLikelihood return ExactMarginalLogLikelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, likelihood, model):\n if not isinstance(likelihood, GaussianLikelihood):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)", "def from_botorch(\n cls,\n model: Model,\n mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,\n ) -> Surrogate:\n surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class)\n surrogate._model = model\n # Temporarily disallowing `update` for surrogates instantiated from\n # pre-made BoTorch `Model` instances to avoid reconstructing models\n # that were likely pre-constructed for a reason (e.g. if this setup\n # doesn't fully allow to constuct them).\n surrogate._constructed_manually = True\n return surrogate", "def set_model(self, likelihood_model_instance):\n pass", "def set_model(self, likelihood_model_instance):\n pass", "def define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=1., predict=False, prior_scale = 1.0, device = 'cpu'):\n\n fmodel = util.make_functional(model)\n dist_list = []\n for tau in tau_list:\n dist_list.append(torch.distributions.Normal(torch.zeros_like(tau), tau**-0.5))\n\n def log_prob_func(params):\n # model.zero_grad()\n # params is flat\n # Below we update the network weights to be params\n params_unflattened = util.unflatten(model, params)\n\n i_prev = 0\n l_prior = torch.zeros_like( params[0], requires_grad=True) # Set l2_reg to be on the same device as params\n for weights, index, shape, dist in zip(model.parameters(), params_flattened_list, params_shape_list, dist_list):\n # weights.data = params[i_prev:index+i_prev].reshape(shape)\n w = params[i_prev:index+i_prev]\n l_prior = dist.log_prob(w).sum() + l_prior\n i_prev += index\n\n # Sample prior if no data\n if x is None:\n # print('hi')\n return l_prior/prior_scale\n\n x_device = x.to(device)\n y_device = y.to(device)\n\n\n output = fmodel(x_device, params=params_unflattened)\n\n if model_loss == 'binary_class_linear_output':\n crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device))\n elif model_loss == 'multi_class_linear_output':\n # crit = nn.MSELoss(reduction='mean')\n crit = nn.CrossEntropyLoss(reduction='sum')\n # crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device.long().view(-1)))\n # ll = - tau_out *(torch.nn.functional.nll_loss(output, y.long().view(-1)))\n elif model_loss == 'multi_class_log_softmax_output':\n ll = - tau_out *(torch.nn.functional.nll_loss(output, y_device.long().view(-1)))\n\n elif model_loss == 'regression':\n # crit = nn.MSELoss(reduction='sum')\n ll = - 0.5 * tau_out * ((output - y_device) ** 2).sum(0)#sum(0)\n\n elif callable(model_loss):\n # Assume defined custom log-likelihood.\n ll = - model_loss(output, y_device).sum(0)\n else:\n raise NotImplementedError()\n\n if torch.cuda.is_available():\n del x_device, y_device\n torch.cuda.empty_cache()\n\n if predict:\n return (ll + l_prior/prior_scale), output\n else:\n return (ll + l_prior/prior_scale)\n\n return log_prob_func", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def make_mlp_likelihood(model=None, model_config=None, wiener_params=None, **kwargs):\n\n def random(\n self,\n keep_negative_responses=True,\n add_model=False,\n add_model_parameters=False,\n add_outliers=False,\n keep_subj_idx=False,\n ):\n \"\"\"\n Generate random samples from a given model (the dataset matches the size of the respective observated dataset supplied as an attribute of self).\n \"\"\"\n\n # This can be simplified so that we pass parameters directly to the simulator ...\n theta = np.array(model_config[\"params_default\"], dtype=np.float32)\n keys_tmp = self.parents.value.keys()\n cnt = 0\n\n for param in model_config[\"params\"]:\n if param in keys_tmp:\n theta[cnt] = np.array(self.parents.value[param]).astype(np.float32)\n cnt += 1\n\n sim_out = simulator(theta=theta, model=model, n_samples=self.shape[0], max_t=20)\n\n # Add outliers:\n if add_outliers:\n if self.parents.value[\"p_outlier\"] > 0.0:\n sim_out = hddm_dataset_generators._add_outliers(\n sim_out=sim_out,\n p_outlier=self.parents.value[\"p_outlier\"],\n max_rt_outlier=1 / wiener_params[\"w_outlier\"],\n )\n\n sim_out_proc = hddm_preprocess(\n sim_out,\n keep_negative_responses=keep_negative_responses,\n keep_subj_idx=keep_subj_idx,\n add_model_parameters=add_model_parameters,\n )\n\n if add_model:\n sim_out_proc[\"model\"] = model\n\n return sim_out_proc\n\n def pdf(self, x):\n # Check if model supplied has only two choice options\n # If yes --> check if two-dimensional input (rt, response) or one-dimensional input (rt) --> processing depends on it\n # If not --> input x has to be two dimensional (rt, response) becasuse we can't deduce response from rt\n x = np.array(x, dtype=np.float32)\n\n if len(x.shape) == 1 or x.shape[1] == 1:\n rt = x\n response = rt / np.abs(rt)\n rt = np.abs(rt)\n elif x.shape[1] == 2:\n rt = x[:, 0]\n response = x[:, 1]\n\n params = np.array(\n [self.parents[param] for param in model_config[\"params\"]]\n ).astype(np.float32)\n\n return hddm.wfpt.wiener_like_nn_mlp_pdf(\n rt,\n response,\n params,\n p_outlier=self.parents.value[\"p_outlier\"],\n w_outlier=wiener_params[\"w_outlier\"],\n network=kwargs[\"network\"],\n )\n\n def cdf(self, x):\n # TODO: Implement the CDF method for neural networks\n return \"Not yet implemented\"\n\n def make_likelihood():\n likelihood_str = make_likelihood_str_mlp(\n config=model_config, wiener_params=wiener_params\n )\n exec(likelihood_str)\n my_fun = locals()[\"custom_likelihood\"]\n return my_fun\n\n # TODO: Allow for rt's of -999 in LAN likelihoods\n def make_likelihood_missing_data():\n return\n\n likelihood_ = make_likelihood()\n\n wfpt_nn = stochastic_from_dist(\"Wienernn_\" + model, partial(likelihood_, **kwargs))\n\n wfpt_nn.pdf = pdf\n wfpt_nn.cdf_vec = None # AF TODO: Implement this for neural nets (not a big deal actually but not yet sure where this is ever used finally)\n wfpt_nn.cdf = cdf\n wfpt_nn.random = random\n return wfpt_nn", "def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def predict_log_likelihood_ratio(self, X):\n class_probs = np.maximum(np.squeeze(self.model.predict(X)[0]), self.class_min)\n return np.log(class_probs / (1 - class_probs))", "def compute_log_marginal_likelihood(\n K_i: torch.Tensor,\n logDetK: torch.Tensor,\n y: torch.Tensor,\n normalize: bool = True,\n log_prior_dist=None,\n):\n lml = (\n -0.5 * y.t() @ K_i @ y\n + 0.5 * logDetK\n - y.shape[0]\n / 2.0\n * torch.log(\n 2\n * torch.tensor(\n np.pi,\n )\n )\n )\n if log_prior_dist is not None:\n lml -= log_prior_dist\n return lml / y.shape[0] if normalize else lml", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n \n if self.BICscore is None:\n k = self.m.num_params\n L = self.m.log_likelihood()\n BIC = L - k/2*np.log(self.n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def get_log_likelihood(response_probability, response):\n pass", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def GPy_log_marginal_likelihood(X, Y, keep_model=True, plot=False, variance=1., lengthscale=3., input_dim=1, length=10., view_ratio=1.1):\r\n kernel= GPy.kern.RBF(input_dim=input_dim, variance=variance, lengthscale=lengthscale)\r\n gp = GPy.models.GPRegression(X, Y, kernel)\r\n # print(gp)\r\n # print(gp.rbf.lengthscale.values)\r\n if plot:\r\n gp.plot(plot_limits=np.array([0., view_ratio*length]))\r\n if keep_model:\r\n return gp.log_likelihood(), gp\r\n elif keep_model==False:\r\n return gp.log_likelihood()\r\n else:\r\n return print(\"keep_model must be True or False.\")", "def predict_log_likelihood_ratio(self, X):\n Xs = self.scaler.transform(X)\n class_probs = np.maximum(self.model.predict_proba(Xs)[:, 1], self.class_min)\n return np.log(class_probs / (1 - class_probs))", "def initialize_model(model_type, **kwargs):\n try:\n model_class = MODEL_DICT[model_type]\n except KeyError:\n raise RuntimeError(f\"Cannot find model class for {model_type}. Pick one of {list(MODEL_DICT.keys())}\")\n\n return model_class(**kwargs)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def MH_step(log_like, log_prior, model_func, prop_params, curr_params,\\\n curr_like, curr_prior, max_like, maxL_params):\n # proposed model:\n prop_model = model_func(prop_params)\n prop_like = log_like(prop_model)\n prop_prior = log_prior(prop_params)\n\n # posterior:\n post_old = curr_like + curr_prior\n post_new = prop_like + prop_prior\n \n # acceptance testing:\n a = np.exp(post_new - post_old)\n draw = np.random.uniform(0, 1)\n \n if (a > draw) and (a < np.inf):\n accept = True\n curr_params = prop_params\n #print(curr_like, max_like)\n if prop_like > max_like:\n max_like = prop_like\n maxL_params = curr_params\n else:\n accept = False\n curr_params = curr_params\n \n return(accept, curr_params, maxL_params, max_like)", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def log_likelihood(self, data, reward_model, bias_params):", "def get_model_class(model_name, task_name):\n if task_name == 'rocstories':\n return OpenAIGPTDoubleHeadsModel if model_name == 'openai-gpt' else GPT2DoubleHeadsModel\n else:\n return OpenAIGPTLMHeadModel if model_name == 'openai-gpt' else GPT2LMHeadModel", "def __init__(self, model: MT):\n self.model: Final[MT] = model", "def multinomial_class(\n distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray]\n) -> jnp.DeviceArray:\n if isinstance(distribution_or_probs, tfd.Distribution):\n return jnp.argmax(distribution_or_probs.logits_parameter(), axis=1)\n return jnp.argmax(distribution_or_probs, axis=1)" ]
[ "0.6100431", "0.5624646", "0.55522686", "0.55522686", "0.536249", "0.53345406", "0.5269471", "0.52394426", "0.5220503", "0.51793855", "0.51639843", "0.508774", "0.50566566", "0.505593", "0.50103873", "0.50024384", "0.49973372", "0.4989311", "0.49551207", "0.49261236", "0.49254608", "0.49082416", "0.4896225", "0.48831117", "0.4874971", "0.48513383", "0.48297438", "0.4827886", "0.48256612", "0.48106003" ]
0.74493885
0
r"""Chooses a BoTorch `AcquisitionFunction` class.
def choose_botorch_acqf_class() -> Type[AcquisitionFunction]: # NOTE: In the future, this dispatch function could leverage any # of the attributes of `BoTorchModel` or kwargs passed to # `BoTorchModel.gen` to intelligently select acquisition function. return qNoisyExpectedImprovement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, acquisition_functions):\n self.acquisition_functions = acquisition_functions", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n return acq_optimiser(acq_fn, anc_data.max_evals)", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n if anc_data.acq_opt_method == 'direct':\n acquisition = lambda x: acq_fn(x.reshape((1, -1)))\n else:\n acquisition = acq_fn\n _, opt_pt = acq_optimiser(acquisition, anc_data.max_evals)\n return opt_pt", "def _function_class(self):\n return FriCASExpectFunction", "def run_acquisition_function(\n acquisition_function,\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n data_array,\n model_type,\n classification_model=None,\n number_of_cpus=0,\n):\n tmp_objective_limits = None\n configurations = concatenate_list_of_dictionaries(configurations)\n configurations = data_dictionary_to_tuple(\n configurations, param_space.get_input_parameters()\n )\n if acquisition_function == \"TS\":\n scalarized_values, tmp_objective_limits = thompson_sampling(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"UCB\":\n scalarized_values, tmp_objective_limits = ucb(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"EI\":\n scalarized_values, tmp_objective_limits = EI(\n configurations,\n data_array,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n else:\n print(\"Unrecognized acquisition function:\", acquisition_function)\n raise SystemExit\n\n scalarized_values = list(scalarized_values)\n\n # we want the local search to consider all points feasible, we already account for feasibility it in the scalarized value\n feasibility_indicators = [1] * len(scalarized_values)\n\n return scalarized_values, feasibility_indicators", "def choose_class(self, *args, **kwargs):", "def __init__(\n self,\n async_strategy=\"impute\",\n impute_strategy=\"cl_min\",\n acq_fun=None,\n acq_fun_kwargs=None,\n acq_optimizer=\"lbfgs\",\n acq_optimizer_kwargs=None,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n # validations\n\n # allowed combinations of async strategies and acquisition functions\n allowed_combinations = {\n \"impute\": {\n \"EI\": GaussianProcess_EI,\n \"LCB\": GaussianProcess_LCB,\n \"PI\": GaussianProcess_PI,\n },\n \"asy_ts\": {\"AsyTS\": AsyTS},\n }\n if async_strategy not in allowed_combinations.keys():\n raise ValueError(\n \"Expected async_strategy to be in {} with GP as surrogate, got {}\".format(\n list(allowed_combinations.keys()), async_strategy\n )\n )\n\n if async_strategy == \"impute\" and self.pruner:\n if not self.interim_results:\n raise ValueError(\n \"Optimizer GP with async strategy `impute` only supports Pruner with interim_results==True, got {}\".format(\n self.interim_results\n )\n )\n\n if acq_fun not in allowed_combinations[async_strategy] and acq_fun is not None:\n raise ValueError(\n \"Expected acq_fun to be in {} with GP as surrogate and {} as async_strategy, got {}\".format(\n list(allowed_combinations[async_strategy].keys()),\n async_strategy,\n acq_fun,\n )\n )\n\n # async_strategy\n self.async_strategy = async_strategy\n\n # configure acquisition function\n if acq_fun is None:\n # default acq_fun is the first in the dict\n acq_fun = list(allowed_combinations[async_strategy].keys())[0]\n self.acq_fun = allowed_combinations[self.async_strategy][acq_fun]()\n self.acq_func_kwargs = acq_fun_kwargs\n\n # configure acquisiton function optimizer\n allowed_acq_opt = [\"sampling\", \"lbfgs\"]\n if acq_optimizer not in allowed_acq_opt:\n raise ValueError(\n \"expected acq_optimizer to be in {}, got {}\".format(\n allowed_acq_opt, acq_optimizer\n )\n )\n self.acq_optimizer = acq_optimizer\n if acq_optimizer_kwargs is None:\n acq_optimizer_kwargs = dict()\n\n if self.async_strategy == \"asy_ts\":\n # default value is 100 and max value is 1000 for asy ts\n self.n_points = np.clip(acq_optimizer_kwargs.get(\"n_points\", 100), 10, 1000)\n else:\n self.n_points = acq_optimizer_kwargs.get(\"n_points\", 10000)\n self.n_restarts_optimizer = acq_optimizer_kwargs.get(\"n_restarts_optimizer\", 5)\n self.acq_optimizer_kwargs = acq_optimizer_kwargs\n\n # configure impute strategy\n if self.async_strategy == \"impute\":\n allowed_impute_strategies = [\"cl_min\", \"cl_max\", \"cl_mean\", \"kb\"]\n if impute_strategy not in allowed_impute_strategies:\n raise ValueError(\n \"expected impute_strategy to be in {}, got {}\".format(\n allowed_impute_strategies, impute_strategy\n )\n )\n self.impute_strategy = impute_strategy\n\n # estimator that has not been fit on any data.\n self.base_model = None\n\n if self.async_strategy == \"impute\":\n self._log(\"Impute Strategy: {}\".format(self.impute_strategy))", "def _function_element_class(self):\n return FriCASFunctionElement", "def next_point(self):\n if self.verbose:\n print(\"Computing acquisition function...\")\n if self.acquisition_function == 'cb':\n acq, pred = acqfunc.confidence_bound(\n self.surrogate_model, self.X_full,\n alpha=self.alpha, beta=self.beta)\n elif self.acquisition_function == 'ei':\n acq, pred = acqfunc.expected_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif self.acquisition_function == 'poi':\n acq, pred = acqfunc.probability_of_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif isinstance(self.acquisition_function, types.FunctionType):\n acq, pred = self.acquisition_function(\n self.surrogate_model, self.X_full, self.X_sparse)\n else:\n raise NotImplementedError(\n \"Choose between 'cb', 'ei', and 'poi' acquisition functions or define your own\")\n self.gp_predictions.append(pred)\n if self.mask is None:\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list][::-1][:self.batch_size].tolist()\n indices_list = np.dstack(indices_list)[0][::-1][:self.batch_size].tolist()\n else:\n acq = self.mask*acq\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list]\n vals_list = vals_list[~np.isnan(vals_list)][::-1]\n indices_list = np.dstack(indices_list)[0]\n indices_list = indices_list[:len(vals_list)][::-1]\n vals_list = vals_list[:self.batch_size].tolist()\n indices_list = indices_list[:self.batch_size].tolist()\n if not self.batch_update:\n return vals_list, indices_list\n if self.batch_dscale is None:\n batch_dscale_ = self.surrogate_model.model.kernel.lengthscale.mean().item()\n else:\n batch_dscale_ = self.batch_dscale\n vals_list, indices_list = self.update_points(\n vals_list, indices_list, batch_dscale_)\n return vals_list, indices_list", "def pick_action(self):\n if self.exploration_mode == 'time':\n self.acq_func.exploration_rate = self.exploration_rate(self.duration + 1)\n elif self.exploration_mode == 'samples':\n self.acq_func.exploration_rate = self.exploration_rate(len(self.rounds) + 1)\n\n fid, x = optim.pick_acquisition_mf(acq_func=self.acq_func,\n optimizer=self.aux_optimizer,\n gammas=self.gammas,\n x_init=self.aux_x_init)\n rmean, rsd = self.acq_func.predict_mf(fid=fid, x=x)\n\n # Undo negation of objective function so as to not confuse user\n if self.mode == 'min':\n rmean = -rmean\n\n rospy.loginfo('Next sample (%d, %s) with beta %f and predicted reward %f +- %f',\n fid,\n str(x), self.acq_func.exploration_rate,\n rmean,\n rsd)\n return fid, x", "def choice(func):\n # __choice_fn func_name used to identify function in Alternation.execute\n def __choice_fn(*args, **kwargs):\n return Choice(func, *args, **kwargs)\n return __choice_fn", "def __init__(self, function):\n self.function = function", "def __init__(self, function=None):\n self._function = function", "def pick_next(self, STATUS, N=100, nysamples=100):\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n if self.acquisition_function == 'Thompson':\n alpha = self.samples()\n \n elif self.acquisition_function == 'Greedy_N':\n y_samples = self.samples(nysamples)\n alpha = np.zeros(self.n)\n for j in range(nysamples):\n # count number of times each point is in the top N for a sample \n alpha[np.argpartition(y_samples[:, j], -N)[-N:]] += 1\n \n elif self.acquisition_function == 'Greedy_tau':\n if np.mod(self.estimate_tau_counter, self.tau_update) == 0:\n self.estimate_tau()\n self.estimate_tau_counter += 1\n else:\n self.estimate_tau_counter += 1\n mu_X_pos, var_X_pos = self.predict()\n alpha = 1-norm.cdf(np.divide(self.tau-mu_X_pos,var_X_pos**0.5))\n \n elif self.acquisition_function == 'EI':\n mu_X_pos, var_X_pos = self.predict()\n sig_X_pos = var_X_pos**0.5\n alpha = (mu_X_pos-self.y_max)*norm.cdf(np.divide(mu_X_pos-self.y_max,sig_X_pos))+sig_X_pos*norm.pdf(np.divide(mu_X_pos-self.y_max,sig_X_pos))\n \n else:\n # if no valid acquisition_function entered then pick at random \n alpha = np.random.rand(self.n)\n print('enter a valid acquisition function - picking randomly')\n ipick = untested[np.argmax(alpha[untested])]\n return ipick", "def start_acquisition(self):\n self.lib.StartAcquisition()", "def getFunctionClass(functionID):\n d = { 1: Linear,\n 2: LinearDrag,\n 11: Gaussian,\n 12: GaussianDrag,\n 21: Lorentzian,\n 22: LorentzianDrag }\n return d[functionID]", "def __get_function(self):\n return random.choice(self.FUNCTIONS)", "def __init__(self, function='cogscore/'):\n self.function = function", "def _set_up_acq_opt_rand(self):\n def _random_max_wrap(*args):\n \"\"\" A wrapper so as to only return optimal point.\"\"\"\n _, opt_pt = random_maximise(*args)\n return opt_pt\n # Set this up in acq_optimise\n self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,\n max_evals)\n if self.get_acq_opt_max_evals is None:\n lead_const = 10 * min(5, self.domain_dim)**2\n self.get_acq_opt_max_evals = lambda t: np.clip(\n lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)\n # Acquisition function should be evaluated via multiple evaluations\n self.acq_query_type = 'multiple'", "def AcquisitionSource(self, default={}):\n tmp = self.data.get('metadata', {}).get('acquisition_source', default)\n return HEP.AcquisitionSourceObject(tmp)", "def __init__(self,\n function: Callable):\n\n self._function = function", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def acquisition_function_random(gp_reward_model: BasicGPRewardModel) -> int:\n return np.random.randint(0, len(gp_reward_model.candidate_queries))", "def get_acquisition_func(i: int):\n switcher = {\n 0: \"category\",\n 1: \"mean\",\n 2: \"std\",\n 3: \"random\",\n }\n return switcher.get(i, \"category\")", "def get_q_func(self, is_training=False, reuse=False, scope='q_func'):\n return functools.partial(self.q_func,\n scope=scope,\n reuse=reuse,\n is_training=is_training)", "def __init__(self, function, **kwargs):\n self.function = function\n self.kwargs = kwargs", "def __init__(self, fitness_function, *args, **kwargs):\n Function.__init__(self, fitness_function)\n self.fitness_function = fitness_function # never used\n self.args = args\n self.kwargs = kwargs", "def func(self):\n return self.__class__", "def auto() -> AutoDistribute:\n return _auto", "def finite_acquisition(self, *args, **kwargs):\n return _uhd_swig.usrp_source_finite_acquisition(self, *args, **kwargs)" ]
[ "0.6058467", "0.5935972", "0.58589655", "0.5788014", "0.548214", "0.5331167", "0.53201264", "0.53140235", "0.53060716", "0.5304556", "0.5121495", "0.5052555", "0.5037035", "0.50274396", "0.500004", "0.49999496", "0.49726632", "0.49604744", "0.49250162", "0.48792323", "0.48333678", "0.47544217", "0.47469586", "0.47466987", "0.47157833", "0.4712404", "0.47073543", "0.4704667", "0.47041744", "0.46938" ]
0.73012865
0
Construct a `TrainingData` object based on sizes of Xs, Ys, and Yvars, and the type of model, for which the training data is intended.
def construct_training_data( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], model_class: Type[Model] ) -> TrainingData: if not isclass(model_class): # pragma: no cover raise ValueError( f"Expected `Type[Model]`, got: {model_class} " f"(type: {type(model_class)})." ) if len(Xs) == len(Ys) == 1: # Just one outcome, can use single model. return TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0]) elif issubclass(model_class, BatchedMultiOutputGPyTorchModel) and all( torch.equal(Xs[0], X) for X in Xs[1:] ): # All Xs are the same and model supports batched multioutput. return TrainingData( X=Xs[0], Y=torch.cat(Ys, dim=-1), Yvar=torch.cat(Yvars, dim=-1) ) elif model_class is ModelListGP: # pragma: no cover # TODO: This will be case for `ListSurrogate`. raise NotImplementedError("`ModelListGP` not yet supported.") raise ValueError(f"Unexpected training data format for {model_class}.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def prepare_dataset(data_path, test_size=0.2, validation_size=0.2):\r\n\r\n # load dataset\r\n if data_path.endswith('json'):\r\n X, y = load_data_from_json(data_path)\r\n else:\r\n X, y = load_data_from_fold(data_path)\r\n # create train, validation, test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)\r\n\r\n # add an axis to nd array\r\n X_train = X_train[..., np.newaxis]\r\n X_test = X_test[..., np.newaxis]\r\n X_validation = X_validation[..., np.newaxis]\r\n\r\n return X_train, y_train, X_validation, y_validation, X_test, y_test", "def build_training_data_loader(self) -> DataLoader:\n pass", "def createtrainingarrays(dataSize, xVariables, yVariable, TrainIndices):\n\n # For the desired training indices, add the values to the training arrays\n xTrainValues = np.array([])\n yTrainValues = np.array([])\n indexCounter = 0\n for q in range(0, dataSize):\n if TrainIndices.__contains__(q):\n\n if indexCounter is 0:\n xTrainValues = xVariables[q]\n indexCounter = -1\n else:\n xTrainValues = np.vstack((xTrainValues, xVariables[q]))\n\n yTrainValues = np.append(yTrainValues, yVariable[0][q])\n\n # Reshape the data to proper dimensions so that a linear regression may be performed\n length = yTrainValues.size\n yTrainValues = yTrainValues.reshape(length, 1)\n\n return xTrainValues, yTrainValues", "def get_data_loader_from_data(cls, batch_size, X, Y, **kwargs):\n X_torch = torch.from_numpy(X).float()\n\n if (\n \"classification_problem\" in kwargs\n and kwargs[\"classification_problem\"] == False\n ):\n Y_torch = torch.from_numpy(Y).float()\n else:\n Y_torch = torch.from_numpy(Y).long()\n dataset = TensorDataset(X_torch, Y_torch)\n kwargs.pop(\"classification_problem\", None)\n return DataLoader(dataset, batch_size=batch_size, **kwargs)", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def build_dataset(self, X, y=None):\n X = np.array(X)\n self.input_dim = X.shape[1]\n X = torch.FloatTensor(X)\n if y is None:\n dataset = torch.utils.data.TensorDataset(X)\n else:\n self.classes_ = sorted(set(y))\n self.n_classes_ = len(self.classes_)\n class2index = dict(zip(self.classes_, range(self.n_classes_)))\n y = [class2index[label] for label in y]\n y = torch.tensor(y)\n dataset = torch.utils.data.TensorDataset(X, y)\n return dataset", "def trainData(self, X, y, NeuralNet, epochs):", "def prepare_dataset(self, xFold_step, xFold_type):\n\n eval_samples_per_xfold = int(round((self.__train_size + self.__eval_size)/xFold_type))\n\n start_index = int(xFold_step*eval_samples_per_xfold)\n end_index = int(start_index + eval_samples_per_xfold)\n\n if end_index < len(self.__read_in_labels[-self.__test_size:]):\n end_index = len(self.__read_in_labels[-self.__test_size:])\n\n dataset = {\n \"x_train\": np.concatenate((self.__read_in_images[:start_index], self.__read_in_images[end_index:]), axis=0),\n \"y_train\": np.concatenate((self.__read_in_labels[:start_index], self.__read_in_labels[end_index:]), axis=0),\n\n \"x_eval\": self.__read_in_images[start_index:end_index],\n \"y_eval\": self.__read_in_labels[start_index:end_index],\n\n \"x_test\": self.__read_in_images[-self.__test_size:],\n \"y_test\": self.__read_in_labels[-self.__test_size:],\n }\n\n return dataset", "def make_data_loader(examples, batch_size=100, shuffle=True):\n x, y = zip(*examples) # makes lists of windows and tags\n x, y = tr.from_numpy(np.array(x)), tr.from_numpy(np.array(y))\n x, y = x.type(tr.LongTensor), y.type(tr.LongTensor) # convert lists to tensors\n train = utdata.TensorDataset(x, y)\n return utdata.DataLoader(train, batch_size, shuffle)", "def make_training_xy(self, data):\n pass", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def _create_model_get_train_X_y(self, X_train, y_train):\n if X_train is not None:\n data_X = X_train.copy()\n else:\n if self.X_train is None:\n data_X = None\n else:\n data_X = self.X_train\n data_y = self.y_train if y_train is None else y_train.copy()\n return data_X, data_y", "def _create_dataset(batch_size):\n ds = collections.OrderedDict([('x', [[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]]),\n ('y', [[1.0], [1.0], [1.0]])])\n # Note: batching is needed here as it creates the required batch dimension.\n # The batch size can be re-set (by `unbatch()` first) in personalization.\n return tf.data.Dataset.from_tensor_slices(ds).batch(batch_size)", "def create_data(dataset, val_size=0.1):\n if dataset == 1:\n (X, y), test = mnist.load_data()\n else:\n (X, y), test = fashion_mnist.load_data()\n\n len_val = int(X.shape[0] * val_size)\n \n return (X[len_val:], y[len_val:]), (X[:len_val], y[:len_val]), test", "def construct(data_dir, fname, X=None, normalize=False, _type='sparse'):\n if _type == 'sparse':\n return SparseFeatures(data_dir, fname, X, normalize)\n elif _type == 'dense':\n return DenseFeatures(data_dir, fname, X, normalize)\n elif _type == 'sequential':\n return SequentialFeatures(data_dir, fname, X)\n else:\n raise NotImplementedError(\"Unknown feature type\")", "def training_data(kind, depth = 5):\n\n if kind == 'unigram':\n return UnigramTrainingData.load(UNIGRAM_DIR + str(depth))\n\n if kind == 'rnn':\n return RNNTrainingData.load(RNN_DIR + str(depth))", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def create_and_train_NN(nn_model, training_data, training_keywords):\n\n data_rows, data_columns = training_data.shape\n\n keywords_rows, keywords_columns = training_keywords.shape\n\n nn_model.create_model(data_columns, keywords_columns)\n\n nn_model.train_model(training_data, training_keywords, graphs=True)", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def prepare_dataset(self, dataset_type: str) -> Dataset:\n\n logger.info(\"Creating features from dataset file at %s\", self.hparams.data_dir)\n\n if dataset_type == \"train\":\n dataset = self.processor.get_train_dataset(self.hparams.data_dir, self.hparams.train_file_name)\n elif dataset_type == \"dev\":\n dataset = self.processor.get_dev_dataset(self.hparams.data_dir, self.hparams.dev_file_name)\n elif dataset_type == \"test\":\n dataset = self.processor.get_test_dataset(self.hparams.data_dir, self.hparams.test_file_name)\n else:\n raise ValueError(f\"{dataset_type} do not support. [train|dev|test]\")\n logger.info(f\"Prepare {dataset_type} dataset (Count: {len(dataset)}) \")\n return dataset", "def initialize_dataloaders(\n self, X: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, np.array]\n ):\n training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array = self.generate_training_validation_split(\n X, y\n )\n training_dataloader_kwargs = {\n \"design_matrix\": training_design_matrix,\n \"targets_array\": training_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": self.shuffle_training_examples,\n }\n validation_dataloader_kwargs = {\n \"design_matrix\": validation_design_matrix,\n \"targets_array\": validation_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": False,\n }\n self.training_dataloader = self.generate_dataloader(**training_dataloader_kwargs)\n self.validation_dataloader = self.generate_dataloader(**validation_dataloader_kwargs)", "def getTrainingData(self):\n raise NotImplementedError", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def __init__(self, data_X, data_Y, dtype=dtypes.float32):\n dtype = dtypes.as_dtype(dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError(\"Invalid dtype %r, expected uint8 or float32\" % dtype)\n\n assert data_X.shape[0] == data_Y.shape[0], (\"data_X.shape: %s data_Y.shape: %s\" % (data_X.shape, data_Y.shape))\n self.num_examples = data_X.shape[0]\n\n if dtype == dtypes.float32:\n data_X = data_X.astype(np.float32)\n self.data_X = data_X\n self.data_Y = data_Y \n\n self.epochs_completed = 0\n self.index_in_epoch = 0" ]
[ "0.62350506", "0.6225271", "0.6160196", "0.6157232", "0.6134622", "0.61098945", "0.6080254", "0.606203", "0.60147977", "0.60086304", "0.59555346", "0.5948243", "0.5919351", "0.59027153", "0.5900304", "0.58960056", "0.5879972", "0.58337307", "0.5829074", "0.5811363", "0.58097553", "0.5799441", "0.57921666", "0.57788956", "0.5762118", "0.5751669", "0.5749159", "0.5743394", "0.57385665", "0.57341266" ]
0.8095568
0
Validates that Xs, Ys, Yvars, and metric names all have equal lengths.
def validate_data_format( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str] ) -> None: if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1: raise ValueError( # pragma: no cover "Lengths of Xs, Ys, Yvars, and metric_names must match. Your " f"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and " f"{len(metric_names)}, respectively." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):\n len_true = list(map(len, y_true))\n len_pred = list(map(len, y_pred))\n is_list = set(map(type, y_true)) | set(map(type, y_pred))\n\n if len(y_true) != len(y_pred) or len_true != len_pred:\n message = 'Found input variables with inconsistent numbers of samples:\\n{}\\n{}'.format(len_true, len_pred)\n raise ValueError(message)", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def validate_X_y(X: List[str], y: List[Any]):\n if len(X) != len(y):\n raise ValueError(\n f\"X and y must have the same length; X has length {len(X)}, and y has length {len(y)}\"\n )", "def _validate_length_features_and_labels(\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ):\n\n # Getting the length of label names, feature_names and feature_stats\n len_of_label_names = (\n 0\n if not model_endpoint.spec.label_names\n else len(model_endpoint.spec.label_names)\n )\n len_of_feature_names = len(model_endpoint.spec.feature_names)\n len_of_feature_stats = len(model_endpoint.status.feature_stats)\n\n if len_of_feature_stats != len_of_feature_names + len_of_label_names:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"The length of model endpoint feature_stats is not equal to the \"\n f\"length of model endpoint feature names and labels \"\n f\"feature_stats({len_of_feature_stats}), \"\n f\"feature_names({len_of_feature_names}),\"\n f\"label_names({len_of_label_names}\"\n )", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,\n y_test_pred):\n\n # check input data shapes are consistent\n X_train, y_train = check_X_y(X_train, y_train)\n X_test, y_test = check_X_y(X_test, y_test)\n\n y_test_pred = column_or_1d(y_test_pred)\n y_train_pred = column_or_1d(y_train_pred)\n\n check_consistent_length(y_train, y_train_pred)\n check_consistent_length(y_test, y_test_pred)\n\n if X_train.shape[1] != X_test.shape[1]:\n raise ValueError(\"X_train {0} and X_test {1} have different number \"\n \"of features.\".format(X_train.shape, X_test.shape))\n\n return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def check_x_and_y_axis_len(self, x_axis, y_axis):\n if x_axis ==0: \n raise ValueError(\"Error! SOM X-Axis is 0!\")\n if y_axis==0:\n raise ValueError(\"Error! SOM Y-Axis is 0!\")", "def validate_common(ndarray, name):\n\tvalidate_ndarray(ndarray,(np.float, np.int), (2,) , name)", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def _validate_columns(self, names):\n if not is_list_like(names):\n raise ValueError(\"Columns should be list-like\")\n\n if len(set(names)) != len(names):\n raise ValueError(\"Duplicate column names\")\n\n if self._data and len(names) != len(self._data[0]):\n raise ValueError(\"Invalid columns length\")", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def test_spaces(self):\n self.assertTrue(validate_measure_input('1 ', self.measures))\n self.assertFalse(validate_measure_input('1 1', self.measures))", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def _check_values_len(self, data_batch: Dict[str, List[str]]):\n values_len = [len(v) for _, v in data_batch.items()]\n unique_len = len(set(values_len))\n assert unique_len == 1, \"Length of values are not consistent across\"", "def validate(self):\n variables = ['waterThickness', 'waterPressure']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')", "def _check_data(self, labels, fluxes, flux_uncertainties, wavelengths=None):\n\n fluxes = np.atleast_2d(fluxes)\n flux_uncertainties = np.atleast_2d(flux_uncertainties)\n\n if len(labels) != fluxes.shape[0]:\n raise ValueError(\"the fluxes should have shape (n_stars, n_pixels) \"\n \"where n_stars is the number of rows in the labels array\")\n\n if fluxes.shape != flux_uncertainties.shape:\n raise ValueError(\"the flux and flux uncertainties array should have\"\n \" the same shape\")\n\n if len(labels) == 0:\n raise ValueError(\"no stars (labels) given\")\n\n if wavelengths is not None:\n wavelengths = np.atleast_1d(wavelengths)\n if wavelengths.size != fluxes.shape[1]:\n raise ValueError(\"mis-match between number of wavelength values\"\n \" ({0}) and flux values ({1})\".format(\n wavelengths.size, fluxes.shape[1]))\n\n return None", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions" ]
[ "0.6732428", "0.65989214", "0.65425104", "0.64392585", "0.6399168", "0.63487905", "0.6343038", "0.62423515", "0.61932313", "0.6176423", "0.6091993", "0.6081119", "0.60686547", "0.6042439", "0.603004", "0.60111535", "0.601055", "0.60092235", "0.59968686", "0.5972382", "0.59626067", "0.59533864", "0.59476745", "0.5907038", "0.59007293", "0.5898824", "0.58938205", "0.5889132", "0.5886622", "0.58786273" ]
0.8259363
0
Extract acquisition and optimizer options from `model_gen_options`.
def construct_acquisition_and_optimizer_options( acqf_options: TConfig, model_gen_options: Optional[TConfig] = None ) -> Tuple[TConfig, TConfig]: acq_options = acqf_options.copy() opt_options = {} if model_gen_options: acq_options.update( checked_cast(dict, model_gen_options.get(Keys.ACQF_KWARGS, {})) ) # TODO: Add this if all acq. functions accept the `subset_model` # kwarg or opt for kwarg filtering. # acq_options[SUBSET_MODEL] = model_gen_options.get(SUBSET_MODEL) opt_options = checked_cast( dict, model_gen_options.get(Keys.OPTIMIZER_KWARGS, {}) ).copy() return acq_options, opt_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_build_options(cls, opt: Opt):\n query_model = 'bert'\n document_model = 'bert'\n query_path = opt['model_file']\n document_path = opt['model_file']\n try:\n # determine if loading a RAG model\n loaded_opt = Opt.load(f\"{query_path}.opt\")\n document_path = loaded_opt.get('dpr_model_file', document_path)\n if loaded_opt['model'] in ['rag', 'fid'] and loaded_opt['query_model'] in [\n 'bert',\n 'bert_from_parlai_rag',\n ]:\n query_model = 'bert_from_parlai_rag'\n if loaded_opt['model'] == 'fid':\n # document model is always frozen\n # but may be loading a FiD-RAG Model\n doc_loaded_opt = Opt.load(\n f\"{modelzoo_path(opt['datapath'], document_path)}.opt\"\n )\n document_path = doc_loaded_opt.get('dpr_model_file', document_path)\n\n except FileNotFoundError:\n pass\n\n return query_model, query_path, document_model, document_path", "def optimizer_config(self):\r\n return {\r\n \"lr\": self.args.lr[0],\r\n \"momentum\": self.args.momentum,\r\n \"weight_decay\": self.args.weight_decay,\r\n }", "def get_model_kwargs(parsed_args):\n parsed_args.model_name = parsed_args.model_name.lower()\n if parsed_args.model_name not in SUPPORTED_MODELS:\n raise ValueError(\"Model name must be in the set: {}\".format(SUPPORTED_MODELS))\n res = {'learning_rate': parsed_args.learning_rate}\n restore_ckpt_dir = parsed_args.restore_efficient_net_weights_from\n res[\"restore_ckpt_dir\"] = restore_ckpt_dir\n if parsed_args.lsd:\n res[\"rsd\"] = parsed_args.lsd\n res[\"feature_extractor_name\"] = parsed_args.feature_extractor_name\n res[\"l2\"] = parsed_args.l2\n res[\"final_layer_dropout_rate\"] = parsed_args.final_layer_dropout_rate\n res[\"label_smoothing\"] = parsed_args.label_smoothing\n if \"dice\" not in parsed_args.loss_name:\n res[\"dice\"] = False\n if parsed_args.sgd:\n res['optimizer'] = tf.train.GradientDescentOptimizer\n else:\n res['optimizer'] = partial(tf.train.AdamOptimizer, beta1=0)\n res['loss_name'] = parsed_args.loss_name\n res[\"n_rows\"] = parsed_args.image_size\n res[\"n_cols\"] = parsed_args.image_size\n return res", "def parse_options(parser):\n TensorflowModel.parse_options(parser)\n parser.add_argument('--input-dim', type=int, default=160)\n parser.add_argument('--input-len', type=int, default=7501)\n parser.add_argument('--output-len', type=int, default=7501)\n parser.add_argument('--conv-layer-num', type=int, default=2)\n parser.add_argument('--conv-kernel-num', type=int, default=1)\n parser.add_argument('--conv-kernel-len', type=int, default=512)", "def iterate_optimizer_configs(options):\n for batch_size in options[consts.BATCH_SIZE]:\n for optimizer in options[consts.OPTIMIZER]:\n config = options.copy()\n config[consts.BATCH_SIZE] = batch_size\n config[consts.OPTIMIZER] = optimizer\n yield config", "def params(config):\n from transformer_tools.model import params as mparams\n mparams(config)\n\n group = OptionGroup(config,\"transformer_tools.Tagger\",\n \"Settings for tagger models\")\n\n group.add_option(\"--model_type\",\n dest=\"model_type\",\n default='bert-base-uncased',\n type=str,\n help=\"The type of tagger to use [default='bert-base-cased']\")\n\n group.add_option(\"--existing_model\",\n dest=\"existing_model\",\n default='',\n type=str,\n help=\"The path of an existing model to load [default='']\")\n\n group.add_option(\"--model_name\",\n dest=\"model_name\",\n default='bert',\n type=str,\n help=\"The name of the model [default='bert']\")\n\n group.add_option(\"--tagger_model\",\n dest=\"tagger_model\",\n default='arrow_tagger',\n type=str,\n help=\"The name of the model [default='arrow_tagger']\")\n\n group.add_option(\"--label_list\",\n dest=\"label_list\",\n default=\"B-up;B-down;B-=\",\n type=str,\n help=\"The types of labels to use [default='B-up;B-down;B-=']\")\n\n group.add_option(\"--save_model_every_epoch\",\n dest=\"save_model_every_epoch\",\n action='store_true',\n default=False,\n help=\"Backup up every model after epoch [default=False]\")\n\n group.add_option(\"--save_optimizer_and_scheduler\",\n dest=\"save_optimizer_and_scheduler\",\n action='store_true',\n default=False,\n help=\"Save the optimizer and schuler [default=False]\")\n\n group.add_option(\"--save_steps\",\n dest=\"save_steps\",\n default=-1,\n type=int,\n help=\"Save model at this frequency [default=-1]\")\n\n\n config.add_option_group(group)", "def parse(self):\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n opt.f_map = [opt.crop_size, opt.crop_size * 2, opt.crop_size * 4, opt.crop_size * 8]\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt", "def get_optimizers(args):\r\n\t# Create a generator which can map a latent vector size 8 to 72\r\n\tG = Generator(\r\n\t\tinput_size=args.g_input_size,\r\n\t\thidden_size=args.g_hidden_size,\r\n\t\toutput_size=args.g_output_size,\r\n\t\tp=args.p\r\n\t)\r\n\t# Create a discriminator which can turn 72-dimensional particle to Binary\r\n\t# prediction\r\n\tD = Discriminator(\r\n\t\tinput_size=args.d_input_size,\r\n\t\thidden_size=args.d_hidden_size,\r\n\t\toutput_size=args.d_output_size,\r\n\t\tp=args.p,\r\n\t\tdropout=args.dropout\r\n\t)\r\n\r\n\t# Choose an optimizer\r\n\tif args.optim == 'Adam':\r\n\t\td_optimizer = optim.Adam(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.Adam(G.parameters(), lr=args.g_learning_rate)\r\n\telse:\r\n\t\td_optimizer = optim.SGD(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.SGD(G.parameters(), lr=args.g_learning_rate, momentum=args.sgd_momentum)\r\n\treturn G, D, d_optimizer, g_optimizer", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _get_optimizer(self):\n raise NotImplementedError", "def get_simulation_options(self):\n return self.opts", "def get_optimization_parameters(self):\n pass", "def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(\n dqpsk_demod.__init__, ('self',), options)", "def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def get_optimizer(model, lr, transfer_optim):\n\n # different otpimizer lr for transfer to reuse low level features\n if transfer_optim:\n if isinstance(model, UNetRegressionModel):\n optimizer = torch.optim.Adam([{'params':\n list(model.msd.inc.parameters()) +\n list(model.msd.down1.parameters()) +\n list(model.msd.down2.parameters()), 'lr': 1e-6},\n {'params': \n list(model.msd.down3.parameters()) +\n list(model.msd.down4.parameters()) +\n list(model.msd.up1.parameters()), 'lr': 1e-5},\n {'params': \n list(model.msd.up2.parameters()) + list(model.msd.up3.parameters()) +\n list(model.msd.up4.parameters()) + list(model.msd.outc.parameters()), 'lr': 1e-4},\n ])\n\n else:\n params = list(model.msd.parameters())\n # case: MSD_d30\n if len(params) < 40:\n optimizer = torch.optim.Adam([{'params': params[1:10], 'lr':1e-6},\n {'params': params[:0]+ params[10:20], 'lr':1e-5},\n {'params': params[20:], 'lr':1e-4},\n ])\n # case: MSD_d80\n else:\n optimizer = torch.optim.Adam([{'params': params[1:20], 'lr':1e-6},\n {'params': params[:0]+ params[20:40], 'lr':1e-5},\n {'params': params[40:], 'lr':1e-4},\n ])\n else:\n optimizer = torch.optim.Adam(model.msd.parameters(), lr)\n\n return optimizer", "def write_optimization_options(self):\n\n # set common options\n g = self.f.require_group('optimizationOptions')\n g.attrs['optimizer'] = 0 # IpOpt\n g.attrs['retryOptimization'] = 1\n g.attrs['hierarchicalOptimization'] = 1\n g.attrs['numStarts'] = 1\n\n # set IpOpt options\n g = self.f.require_group('optimizationOptions/ipopt')\n g.attrs['max_iter'] = 100\n g.attrs['hessian_approximation'] = np.string_(\"limited-memory\")\n g.attrs[\"limited_memory_update_type\"] = np.string_(\"bfgs\")\n g.attrs[\"tol\"] = 1e-9\n g.attrs[\"acceptable_iter\"] = 1\n # set ridiculously high, so only the acceptable_* options below matter\n g.attrs[\"acceptable_tol\"] = 1e20\n g.attrs[\"acceptable_obj_change_tol\"] = 1e-12\n g.attrs[\"watchdog_shortened_iter_trigger\"] = 0\n\n # set fmincon options\n g = self.f.require_group('optimizationOptions/fmincon')\n g.attrs['MaxIter'] = 100\n g.attrs[\"TolX\"] = 1e-8\n g.attrs[\"TolFun\"] = 0\n g.attrs[\"MaxFunEvals\"] = 1e7\n g.attrs[\"algorithm\"] = np.string_(\"interior-point\")\n g.attrs[\"GradObj\"] = np.string_(\"on\")\n g.attrs[\"display\"] = np.string_(\"iter\")\n\n # set CERES options\n g = self.f.require_group('optimizationOptions/ceres')\n g.attrs['max_num_iterations'] = 100\n\n # set toms611/SUMSL options\n g = self.f.require_group('optimizationOptions/toms611')\n g.attrs['mxfcal'] = 1e8\n\n self.write_bounds()\n self.write_starting_points()", "def get_mo_options_from_cfg(\n deploy_cfg: mmengine.Config) -> ModelOptimizerOptions:\n backend_config = get_backend_config(deploy_cfg)\n mo_options = backend_config.get('mo_options', None)\n mo_options = ModelOptimizerOptions(mo_options)\n return mo_options", "def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)", "def _options(self):\n return", "def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }", "def configure_optimizers(self):\n optimizer = _get_optimizer(model_parameters=self.parameters(\n ), project_parameters=self.project_parameters)\n if self.project_parameters.step_size > 0:\n lr_scheduler = _get_lr_scheduler(\n project_parameters=self.project_parameters, optimizer=optimizer)\n return [optimizer], [lr_scheduler]\n else:\n return optimizer", "def get_model_args(args):\r\n global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, ADVANCED_OPTIONS, \\\r\n DATA_OPTIONS, BERT_CONFIG\r\n\r\n required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | ADVANCED_OPTIONS \\\r\n | DATA_OPTIONS | BERT_CONFIG\r\n\r\n arg_values = {k: v for k, v in vars(args).items() if k in required_args}\r\n return argparse.Namespace(**arg_values)", "def gyp_generator_flags():\n return dict(arg.split('=', 1)\n for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)", "def parse_opts():\n MODELS = core.list_models()\n flags = [arg for arg in sys.argv[1:]\n if arg.startswith('-')]\n values = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' in arg]\n args = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' not in arg]\n models = \"\\n \".join(\"%-15s\"%v for v in MODELS)\n if len(args) == 0:\n print(USAGE)\n print(\"\\nAvailable models:\")\n print(columnize(MODELS, indent=\" \"))\n sys.exit(1)\n if len(args) > 3:\n print(\"expected parameters: model N1 N2\")\n\n name = args[0]\n try:\n model_info = core.load_model_info(name)\n except ImportError as exc:\n print(str(exc))\n print(\"Could not find model; use one of:\\n \" + models)\n sys.exit(1)\n\n invalid = [o[1:] for o in flags\n if o[1:] not in NAME_OPTIONS\n and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)]\n if invalid:\n print(\"Invalid options: %s\"%(\", \".join(invalid)))\n sys.exit(1)\n\n\n # pylint: disable=bad-whitespace\n # Interpret the flags\n opts = {\n 'plot' : True,\n 'view' : 'log',\n 'is2d' : False,\n 'qmax' : 0.05,\n 'nq' : 128,\n 'res' : 0.0,\n 'accuracy' : 'Low',\n 'cutoff' : 0.0,\n 'seed' : -1, # default to preset\n 'mono' : False,\n 'show_pars' : False,\n 'show_hist' : False,\n 'rel_err' : True,\n 'explore' : False,\n 'use_demo' : True,\n 'zero' : False,\n }\n engines = []\n for arg in flags:\n if arg == '-noplot': opts['plot'] = False\n elif arg == '-plot': opts['plot'] = True\n elif arg == '-linear': opts['view'] = 'linear'\n elif arg == '-log': opts['view'] = 'log'\n elif arg == '-q4': opts['view'] = 'q4'\n elif arg == '-1d': opts['is2d'] = False\n elif arg == '-2d': opts['is2d'] = True\n elif arg == '-exq': opts['qmax'] = 10.0\n elif arg == '-highq': opts['qmax'] = 1.0\n elif arg == '-midq': opts['qmax'] = 0.2\n elif arg == '-lowq': opts['qmax'] = 0.05\n elif arg == '-zero': opts['zero'] = True\n elif arg.startswith('-nq='): opts['nq'] = int(arg[4:])\n elif arg.startswith('-res='): opts['res'] = float(arg[5:])\n elif arg.startswith('-accuracy='): opts['accuracy'] = arg[10:]\n elif arg.startswith('-cutoff='): opts['cutoff'] = float(arg[8:])\n elif arg.startswith('-random='): opts['seed'] = int(arg[8:])\n elif arg == '-random': opts['seed'] = np.random.randint(1e6)\n elif arg == '-preset': opts['seed'] = -1\n elif arg == '-mono': opts['mono'] = True\n elif arg == '-poly': opts['mono'] = False\n elif arg == '-pars': opts['show_pars'] = True\n elif arg == '-nopars': opts['show_pars'] = False\n elif arg == '-hist': opts['show_hist'] = True\n elif arg == '-nohist': opts['show_hist'] = False\n elif arg == '-rel': opts['rel_err'] = True\n elif arg == '-abs': opts['rel_err'] = False\n elif arg == '-half': engines.append(arg[1:])\n elif arg == '-fast': engines.append(arg[1:])\n elif arg == '-single': engines.append(arg[1:])\n elif arg == '-double': engines.append(arg[1:])\n elif arg == '-single!': engines.append(arg[1:])\n elif arg == '-double!': engines.append(arg[1:])\n elif arg == '-quad!': engines.append(arg[1:])\n elif arg == '-sasview': engines.append(arg[1:])\n elif arg == '-edit': opts['explore'] = True\n elif arg == '-demo': opts['use_demo'] = True\n elif arg == '-default': opts['use_demo'] = False\n # pylint: enable=bad-whitespace\n\n if len(engines) == 0:\n engines.extend(['single', 'sasview'])\n elif len(engines) == 1:\n if engines[0][0] != 'sasview':\n engines.append('sasview')\n else:\n engines.append('single')\n elif len(engines) > 2:\n del engines[2:]\n\n n1 = int(args[1]) if len(args) > 1 else 1\n n2 = int(args[2]) if len(args) > 2 else 1\n use_sasview = any(engine=='sasview' and count>0\n for engine, count in zip(engines, [n1, n2]))\n\n # Get demo parameters from model definition, or use default parameters\n # if model does not define demo parameters\n pars = get_pars(model_info, opts['use_demo'])\n\n\n # Fill in parameters given on the command line\n presets = {}\n for arg in values:\n k, v = arg.split('=', 1)\n if k not in pars:\n # extract base name without polydispersity info\n s = set(p.split('_pd')[0] for p in pars)\n print(\"%r invalid; parameters are: %s\"%(k, \", \".join(sorted(s))))\n sys.exit(1)\n presets[k] = float(v) if not k.endswith('type') else v\n\n # randomize parameters\n #pars.update(set_pars) # set value before random to control range\n if opts['seed'] > -1:\n pars = randomize_pars(pars, seed=opts['seed'])\n print(\"Randomize using -random=%i\"%opts['seed'])\n if opts['mono']:\n pars = suppress_pd(pars)\n pars.update(presets) # set value after random to control value\n #import pprint; pprint.pprint(model_info)\n constrain_pars(model_info, pars)\n if use_sasview:\n constrain_new_to_old(model_info, pars)\n if opts['show_pars']:\n print(str(parlist(model_info, pars, opts['is2d'])))\n\n # Create the computational engines\n data, _ = make_data(opts)\n if n1:\n base = make_engine(model_info, data, engines[0], opts['cutoff'])\n else:\n base = None\n if n2:\n comp = make_engine(model_info, data, engines[1], opts['cutoff'])\n else:\n comp = None\n\n # pylint: disable=bad-whitespace\n # Remember it all\n opts.update({\n 'name' : name,\n 'def' : model_info,\n 'n1' : n1,\n 'n2' : n2,\n 'presets' : presets,\n 'pars' : pars,\n 'data' : data,\n 'engines' : [base, comp],\n })\n # pylint: enable=bad-whitespace\n\n return opts", "def modify_model_commandline_options(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n # module\n parser.add_argument('--discriminator_module_name', type=str, required=True, choices=discriminator_modules.keys())\n parser.add_argument('--generator_module_name', type=str, required=True, choices=generator_modules.keys())\n opt, _ = parser.parse_known_args()\n discriminator_module_modify_commandline_options = discriminator_module_options[opt.discriminator_module_name]\n generator_module_modify_commandline_options = generator_module_options[opt.generator_module_name]\n parser = discriminator_module_modify_commandline_options(parser)\n parser = generator_module_modify_commandline_options(parser)\n\n # optimizer\n parser.add_argument('--discriminator_optimizer_name', type=str, required=True, choices=optimizers.keys())\n parser.add_argument('--generator_optimizer_name', type=str, required=True, choices=optimizers.keys())\n opt, _ = parser.parse_known_args()\n discriminator_optimizer_modify_commandline_options = optimizer_options[opt.discriminator_optimizer_name]\n generator_optimizer_modify_commandline_options = optimizer_options[opt.generator_optimizer_name]\n parser = discriminator_optimizer_modify_commandline_options(parser)\n parser = generator_optimizer_modify_commandline_options(parser)\n\n # scheduler\n parser.add_argument('--discriminator_scheduler_name', type=str, required=True, choices=schedulers.keys())\n parser.add_argument('--generator_scheduler_name', type=str, required=True, choices=schedulers.keys())\n opt, _ = parser.parse_known_args()\n discriminator_scheduler_modify_commandline_options = scheduler_options[opt.discriminator_scheduler_name]\n generator_scheduler_modify_commandline_options = scheduler_options[opt.generator_scheduler_name]\n parser = discriminator_scheduler_modify_commandline_options(parser)\n parser = generator_scheduler_modify_commandline_options(parser)\n\n # init weight\n parser.add_argument('--init_weight_name', type=str, required=True, choices=init_weights.keys())\n opt, _ = parser.parse_known_args()\n init_weight_modify_commandline_options = init_weight_options[opt.init_weight_name]\n parser = init_weight_modify_commandline_options(parser)\n\n return parser", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)", "def get_adv_optimizer(self, mode: str) -> torch.optim.Optimizer:\n pass" ]
[ "0.6497855", "0.6085382", "0.57732934", "0.56216425", "0.55810773", "0.5563721", "0.54467785", "0.54459274", "0.54262084", "0.54096705", "0.53853124", "0.5346946", "0.5331329", "0.53296965", "0.53257495", "0.5312863", "0.5310399", "0.52858835", "0.52766055", "0.52655095", "0.5258684", "0.5227333", "0.5224458", "0.52107024", "0.5205455", "0.5203974", "0.5193468", "0.51817644", "0.51801366", "0.51705194" ]
0.6829646
0
Return the hash digest as a bytes object. This is the bigendian representation of the value returned by ``intdigest()`` and is equivalent to the output of the ``XXH64_canonicalFromHash()`` function in the `reference implementation`_ applied to the value returned by ``intdigest()``.
def digest(self): # For discussion of big-endian vs little-endian for the hash # digest of XXHASH algorithms, see # https://github.com/Cyan4973/xxHash/issues/45 return struct.pack(">Q", self.intdigest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\n return self.hashObject.hexdigest()", "def digest(self):\n return self._hash", "def hash(self) -> bytes:", "def digest(self):\n return digest_tools.sha256_digest(self._payload.as_encoded_str())", "def hash(self):\n return Hash.dhash(bytes(self))", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def _digest(self):\n return self._hasher.hexdigest()", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def digest(self) -> bytes:\n # items in data MUST be byte-like objects\n data = []\n\n for key, value in self.items():\n data.append(key)\n if value is not None:\n data.append(value)\n\n return hashlib.sha3_256(b'|'.join(data)).digest()", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def incore_digest(self):\n return hasher(self.content).hexdigest()", "def digest(self):\r\n\r\n H0 = self.H0\r\n H1 = self.H1\r\n H2 = self.H2\r\n H3 = self.H3\r\n H4 = self.H4\r\n inputdata = [] + self.inputdata\r\n count = [] + self.count\r\n\r\n index = (self.count[1] >> 3) & 0x3fL\r\n\r\n if index < 56:\r\n padLen = 56 - index\r\n else:\r\n padLen = 120 - index\r\n\r\n padding = ['\\200'] + ['\\000'] * 63\r\n self.update(padding[:padLen])\r\n\r\n # Append length (before padding).\r\n bits = _sha_bytelist2longBigEndian(self.inputdata[:56]) + count\r\n\r\n self._transform(bits)\r\n\r\n # Store state in digest.\r\n digest = _sha_long2bytesBigEndian(self.H0, 4) + \\\r\n _sha_long2bytesBigEndian(self.H1, 4) + \\\r\n _sha_long2bytesBigEndian(self.H2, 4) + \\\r\n _sha_long2bytesBigEndian(self.H3, 4) + \\\r\n _sha_long2bytesBigEndian(self.H4, 4)\r\n\r\n self.H0 = H0 \r\n self.H1 = H1 \r\n self.H2 = H2\r\n self.H3 = H3\r\n self.H4 = H4\r\n self.inputdata = inputdata \r\n self.count = count \r\n\r\n return digest", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def hash(self):\n return self._hash", "def sign_hash(self, private_key, hash_id, digest):\n d_digest = Data(digest)\n signature = Buffer(self.signature_len(private_key=private_key))\n status = self._lib_vscf_ecc.vscf_ecc_sign_hash(self.ctx, private_key.c_impl, hash_id, d_digest.data, signature.c_buffer)\n VscfStatus.handle_status(status)\n return signature.get_bytes()", "def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')" ]
[ "0.69930434", "0.69264513", "0.67882836", "0.6712462", "0.66841334", "0.66802466", "0.6673555", "0.6600667", "0.6553306", "0.6533125", "0.6499518", "0.6499283", "0.6481292", "0.6456057", "0.6456057", "0.64141536", "0.6401427", "0.6399107", "0.638602", "0.63651985", "0.62907857", "0.6269256", "0.626924", "0.6265182", "0.6249195", "0.62076014", "0.62076014", "0.6181992", "0.61363965", "0.61119395" ]
0.78258497
0
Return the hash digest as a string of hexidecimal digits. This is the value returned by ``digest()`` expressed as a printable hex string for easy display.
def hexdigest(self): # bytes.hex() is simpler, but not available For Python <= 3.4 return "".join("{0:0>2x}".format(b) for b in self.digest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def digest(self):\n # For discussion of big-endian vs little-endian for the hash\n # digest of XXHASH algorithms, see\n # https://github.com/Cyan4973/xxHash/issues/45\n return struct.pack(\">Q\", self.intdigest())", "def __str__(self: Hash) -> str:\n return self.to_hex()", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def _digest(self):\n return self._hasher.hexdigest()", "def digest(self):\n return self._hash", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')", "def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')", "def printsha(self):\n print(self.sha256.hex())", "def digest(self, seq):\n\n h = hashlib.new(self._hash_algorithm)\n h.update(seq)\n dig = h.hexdigest()\n\n return dig", "def createHashcodeString(digest):\n map_num2hex = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n hashcodelist = [None] * len(digest)\n \n for i1 in range(0, len(digest)):\n digest_i = digest[i1] # Extracts the number from the digest.\n hashcodelist[i1] = map_num2hex[digest_i] # Turns the number to a hex value and assigns it to the hashcodelist.\n \n hashcodestring = \"\"\n \n for i1 in range(0, len(hashcodelist)):\n hashcodestring = hashcodestring + hashcodelist[i1] # Appends the characters to form a string.\n \n return hashcodestring", "def hash_string(self):\n return self._hash_string", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def as_hex(self, *, align='left'):\n return self.as_bytes(align=align).hex()", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def to_h(self):\n return str(self).encode('hex')", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)" ]
[ "0.8446957", "0.8241606", "0.8034233", "0.7728154", "0.7493876", "0.72516394", "0.7215683", "0.70375633", "0.6966864", "0.69554013", "0.6895401", "0.6893466", "0.6879853", "0.6794466", "0.6791125", "0.6765434", "0.67632526", "0.67488396", "0.6718266", "0.6668175", "0.6663794", "0.66539264", "0.6644642", "0.6637669", "0.6594874", "0.6577905", "0.6569104", "0.6555183", "0.6544478", "0.65430665" ]
0.8364801
1
Check if the specified instance matches the service's model.
def _isinstance(self, instance, raise_error=True): if isinstance(instance, self.__model__): return True elif raise_error: raise ValueError('{} is not of type {}.'.format( instance, self.__model__, )) else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkModel(self, model):\n # TODO", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def have_this_instance(self, instance):\n for i in self.all_instances:\n if i == instance:\n print(\"YES ITS ME!\")\n return True\n print(\"NO S.B. ELSE\")\n return False", "def test_instance_BaseModel(self):\n self.assertTrue(isinstance(self.my_object, BaseModel))", "def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def is_model(self):\n return self.model_name() is not None", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)", "def test_instance(self):\n b = Review()\n self.assertIsInstance(b, Review)\n self.assertTrue(issubclass(type(b), BaseModel))", "def test_instance(self):\n self.assertIsInstance(self.test1, BaseModel)", "def conforms(self, instance, format):\r\n\r\n try:\r\n self.check(instance, format)\r\n except FormatError:\r\n return False\r\n else:\r\n return True", "def is_valid(self, data_model: DataModel) -> bool:", "def __eq__(self, other):\n if not isinstance(other, LookmlModel):\n return False\n\n return self.__dict__ == other.__dict__", "def match(self, cls):\n return isinstance(self, cls)", "def is_for(self, model_type: str, version: Version):\n return model_type == self.model_type and version in self.version_spec", "def instance_valid(instance):\n return zope.interface.verify.verifyObject(IKeyValueDB, instance)", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def __contains__(self, instance: object) -> bool:\n try:\n state = attributes.instance_state(instance)\n except exc.NO_STATE as err:\n raise exc.UnmappedInstanceError(instance) from err\n return self._contains_state(state)", "def check_model(expected_model, actual_model):\n assert (expected_model == actual_model), \\\n \"Not Compare model: Expected model:\\n {0}\\nActual model:\\n {1}\".format(expected_model, actual_model)", "def test_instance_equality(self):\n class EqualityModel(Model):\n pk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel(pk=0)\n m1 = EqualityModel(pk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def __eq__(self, other):\n if not isinstance(other, ServerModel):\n return False\n\n return self.__dict__ == other.__dict__", "def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False", "def model_is_valid(self, model: OscalBaseModel) -> bool:\n oscal_version = model.metadata.oscal_version.__root__\n p = re.compile(OSCAL_VERSION_REGEX)\n matched = p.match(oscal_version)\n return matched is not None", "def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv", "def __eq__(self, other):\n if not isinstance(other, Service):\n return False\n\n return self.__dict__ == other.__dict__", "def test_instance_equality(self):\r\n class EqualityModel(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel(pk=0)\r\n m1 = EqualityModel(pk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def is_instance(self,instance):\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor attribute in self.utility.av_counts.keys():\n\t\t\tif attribute not in inst_attributes:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tif (self.utility.av_counts[attribute][value] / self.utility.count) != 1.0:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif inst_attributes[attribute] != value:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\tif inst_attributes[attribute] != self.utility.av_counts[attribute]['numerically_valued_attribute'] / self.utility.count:\n\t\t\t\t\t\treturn False\n\t\t\n\t\tfor attribute in instance:\n\t\t\tif attribute not in self.utility.av_counts:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tif inst_attributes[attribute] not in self.utility.av_counts[attribute]:\n\t\t\t\t\treturn False\n\t\t\t\tif ((self.utility.av_counts[attribute][inst_attributes[attribute]] / self.utility.count) != 1.0):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif len(self.utility.av_counts[attribute].keys()) != 1 or self.utility.av_counts[attribute].get('numerically_valued_attribute', 0) == 0:\n\t\t\t\t\treturn False\n\t\t\n\t\treturn True", "def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False", "def is_peewee_model(obj) -> bool:\n return (inspect.isclass(obj) and\n issubclass(obj, peewee.Model) and\n not obj == peewee.Model and\n not obj.__name__.startswith('_'))" ]
[ "0.6658003", "0.6365422", "0.63516694", "0.59845364", "0.59756815", "0.59461135", "0.5938544", "0.59184885", "0.5915191", "0.59148186", "0.5906111", "0.59015507", "0.5884941", "0.585603", "0.5847129", "0.58132994", "0.57774276", "0.57701457", "0.5730501", "0.57222885", "0.57215434", "0.57187676", "0.5705217", "0.5691113", "0.56890965", "0.5675528", "0.56570476", "0.5643482", "0.56290567", "0.5621421" ]
0.650044
1
Converts the provided integer 'n' into a valid insertion point in the string 's', ie the current index locations or at the end
def gen_index_via_mod(s, n): if len(s) == 0: return 0 return n % (len(s) + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c", "def InfIntToStr(s, i, n):\n if i == len(s):\n return \"\"\n elif i == 0:\n return str(int(s[i])) + InfIntToStr(s, i + 1, n)\n else:\n return str(int(s[i])).zfill(n) + InfIntToStr(s, i + 1, n)", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def esrever2(n, s):\n if n == 0:\n return s\n else:\n result = esrever2(n // 10, s * 10 + n % 10)\n return result", "def d(s):\n return s + 1", "def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t", "def cmd_n(self,s):\n length = 0\n node = self.start\n while node is not None:\n line = node.element\n length += len(line)\n if line.find(s):\n self.cursor = node\n self.delta = line.find(s)\n break\n node = node.next\n self.get_text()", "def stoi(self, s):\n idx = self._stoi.get(s)\n return idx + 2 if idx else self.unk_idx", "def f(n):\n\tnstr = ''\n\tfor i in range(1, n + 1):\n\t\tnstr = nstr + str(i)\n\treturn nstr", "def fn(i, s=\"\", n=0):\n if i == len(word): return ans.append(s + (str(n) if n else \"\"))\n fn(i+1, s, n+1)\n fn(i+1, s + (str(n) if n else \"\") + word[i], 0)", "def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1", "def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.word\n string += \"\\n\" + self.children[0].get_string(n + 1)\n string += \"\\n\" + self.children[1].get_string(n + 1)\n return string", "def shift_column(code, n, s):\n def shift(s, n):\n if n == 0 or len(s) == 1:\n return s\n else:\n return shift(s[-1] + s[:-1], n-1)\n\n if type(code) is not list:\n return code\n else:\n n = int(n)\n s = int(s) % len(code)\n if s > 0 and n < len(code[0]):\n column = select_column(code, n)\n column = shift(column, s)\n for i in range(0, len(column)):\n new = list(code[i])\n new[n] = column[i]\n code[i] = ''.join(new)\n return code\n else:\n return code", "def progress_string(i, n):\n width = len(str(n))\n string = \"({0:{width}d}/{1:d})\".format(i, n, width=width)\n return string", "def cd2p(s, N):\n letter = s[0].upper()\n number = s[1:]\n col = letter_coord.index(letter) + 1\n row = (N + 1) - int(number)\n # print('row:{} col:{}'.format(row,col))\n return col + (N + 1) * row", "def line(n, str):\n\n return_value = ''\n for _ in range(n):\n return_value += str\n return return_value", "def expanding(self,pos_0,pos_1,n):\r\n cnvt_front=self.string(pos_0,pos_1,n)\r\n if int(cnvt_front) in self.expanded:\r\n\r\n a=1\r\n else:\r\n self.expanded.append(int(cnvt_front))", "def sindex(string, row, col):\r\n n = 0\r\n for _ in range(row-1):\r\n n = string.find('\\n', n) + 1\r\n return n+col-1", "def generateParenthesis(self, n):\n sol = []\n \n def dfs(cur_str, o, c):\n if o==n and o==c:\n sol.append(cur_str)\n else:\n if o < n:\n dfs(cur_str + \"(\", o + 1, c)\n if c < o:\n dfs(cur_str + \")\", o, c+1)\n dfs(\"\", 0, 0)\n return sol", "def str_fill(i, n):\r\n return str(i).zfill(n)", "def recurse(n, s):\n print(f\"recurse n -> {n}\")\n print(f\"recurse s -> {s}\")\n if n == 0:\n print(s)\n else:\n recurse(n-1, n+s)", "def encode1(s,n):\n r = \"\"\n for l in s:\n l = ord(l) # convert to ascii\n l = l - 97 # 'a' is 97 so we want to reduce so 'a'=0 'b'=1 etc\n l = l + n # add the offset\n l=l%26 # use mod so that we wrap around back to 'a' if we go past 'z'\n l=l+97 # and add back the 97\n r = r + chr(l)\n return r", "def fo_shizzle_my_nizzle(n): \n if n < 0:\n n = \"fo\"\n elif n >= 1 and n < 50: \n n = \"shizzle\"\n elif n >= 50 and n <= 100:\n n = \"my\"\n elif n % 2 == 0 and n % 3 == 0 and n > 100:\n n = \"nizzle\"\n else:\n n = \"\"\n return n", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)", "def fn(i, n):\n if not (n <= len(s)-i <= 3*n): return \n if i == len(s): return ans.append(\".\".join(stack))\n k = i+1 if s[i] == \"0\" else i+3\n for j in range(i+1, min(k, len(s))+1): \n if j == i+3 and s[i:j] > \"255\": continue\n stack.append(s[i:j])\n fn(j, n-1)\n stack.pop()" ]
[ "0.63267386", "0.6178822", "0.61630845", "0.6155788", "0.6115236", "0.6103694", "0.6012176", "0.6011193", "0.594181", "0.593987", "0.5923727", "0.5884271", "0.580508", "0.5737656", "0.5725076", "0.5623249", "0.5594806", "0.55564487", "0.5525483", "0.55181396", "0.5496168", "0.54950446", "0.5493407", "0.54928535", "0.5476004", "0.54742867", "0.5470193", "0.54574436", "0.54181755", "0.5409476" ]
0.6202475
1
Gets all announcements on the server
def get(self): announcements = Announcement.query.all() announcements = announcements_schema.dump(announcements) if not announcements: return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served return {'status': 'success', 'announcements': announcements}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getAnnouncements(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getAnnouncements()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAnnouncements\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getAnnouncements\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/announcements\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def get_announcements(self, factory: 'AnnouncementFactory') -> 'AnnouncementCollection':\n collection = factory.get_announcement_collection(self.get_announcement_data_list())\n return collection", "def announce(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.tracker_announce\")\n\n return m.call()[-1]", "def get(self):\n return {'status': 'success', 'count': Announcement.query.count()}, 200", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def get(self, request):\n announcement_id = request.GET.get(\"id\")\n if announcement_id:\n try:\n announcement = Announcement.objects.get(id=announcement_id)\n return self.success(AnnouncementSerializer(announcement).data)\n except Announcement.DoesNotExist:\n return self.error(\"Announcement does not exist\")\n announcement = Announcement.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n announcement = announcement.filter(visible=True)\n return self.success(self.paginate_data(request, announcement, AnnouncementSerializer))", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data", "def issuelinks_all(request, format=None):\n if request.method == 'GET':\n issuelinks = IssueLink.objects.all()\n serializer = IssueLinkSerializer(issuelinks, many=True)\n return Response(serializer.data)", "def list_all_agencies():\n return JsonResponse.create(StatusCode.OK, get_all_agencies())", "def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())", "def api_all():\n all_mail = mail_dao.get_all()\n return _create_response(all_mail)", "def get_all(self):\n return self.__fetcher.get_fetched()", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def get_articles(self, publish_status):\n query_str = (\n \"SELECT Id,KnowledgeArticleId,Title,UrlName FROM {} \"\n \"WHERE PublishStatus='{}' AND language='en_US'\"\n ).format(\n settings.SALESFORCE_ARTICLE_TYPE,\n publish_status,\n )\n result = self.api.query(query_str)\n return result['records']", "def all_entries(cls):\n info = Diary.entries\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def list():\r\n articles = []\r\n if request.method == 'GET':\r\n # Get all articles\r\n response = table.scan()\r\n articles = response.get('Items')\r\n\r\n return render_template('article/articles.html', articles=articles, title='List Articles')", "async def get_all(request):\n pass", "def list(self):\n return self.connection.get(self.service)", "def get_all_podcasts():\r\n return [Podcast.podcast_json(podcast) for podcast in Podcast.query.all()]", "def list(limit, export):\n GetArticles.get_all_articles(limit, export)", "def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']", "def fetch(self):\n\n entries = []\n for activity in self.activities[\"entries\"]:\n entries.append(\n [\n element\n for element in [activity[\"title\"], activity[\"content\"][0][\"value\"]]\n ]\n )\n\n return entries[0 : self.max_entries]", "async def getofficialnews(self, appID: int = None) -> typing.List:\n appID = appID if appID is not None else self.appID\n\n news = await SteamNewsPost.asyncgetnewsforapp(\n appID=appID, count=15, maxlength=600\n )\n logging.info(f\"{len(news)} {self._parsername} post(s) returned by Steam's API\")\n officialnews = [\n item for item in news if self.RLnewsfilter(item, self.psyonixstaff)\n ]\n\n logging.info(f\"Found {len(officialnews)} official {self._parsername} post(s)\")\n return officialnews", "def get_articles(db:Session):\n return db.query(ArticleModel).all()", "async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())", "def get_all_content(self):\n return self._get_all_content()", "def get():\n all_finished_anime = AnimeViewed.query.all()\n list_anime_viewed = []\n\n for anime_viewed in all_finished_anime:\n list_anime_viewed.append(anime_viewed.to_dict())\n\n return make_response(jsonify(list_anime_viewed), 200)" ]
[ "0.67929053", "0.6228621", "0.61567163", "0.6098976", "0.5930889", "0.59122926", "0.58568746", "0.5851314", "0.58407116", "0.5822206", "0.5804673", "0.5770579", "0.57031876", "0.56998605", "0.5559147", "0.5553164", "0.5529305", "0.55254424", "0.55103004", "0.5502346", "0.5491109", "0.54636586", "0.54524744", "0.5426534", "0.5419024", "0.5413854", "0.5413613", "0.53921217", "0.5376735", "0.5372632" ]
0.6474763
1
delete a announcement by ID
def delete(self, announcementID): announcement = Announcement.query.filter_by(announcementID=announcementID) if not announcement.first(): return {'status': 'fail', 'message': 'No announcement with ID ' + str(announcementID) + ' exists'}, 404 announcement.delete() db.session.commit() return {'status': 'sucess', 'message': 'Announcement Deleted'}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _id):", "def delete(self, id):\n raise NotImplementedError", "def delete(self,id):\r\n return delete(id=id)", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, id):\n return delete_msg(id)", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def delete_by_id(self, subject_id: str) -> any:\n pass", "def delete(self, cls, id):\n pass", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500", "def delete(id):\n elementFromDB = Advertisements().get_one_element(id)\n if elementFromDB is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n try:\n elements = Advertisements().delete_element(id)\n result = jsonify(elements)\n result.statut_code = 200\n return result\n except Exception as identifier:\n return abort(500, identifier)", "def delete(cls, id):\n raise Exception('Not Implemented Yet')", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def delete():", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(self, id):\n r = validate_get(id)\n tareaID = r.tarea.id\n r.destroySelf()\n flash(_(u'El %s fue eliminado permanentemente.') % name)\n raise redirect('../list/%d' % tareaID)", "def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)", "def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def delete(id):\n get_autor(id)\n try:\n db.insert_bd('DELETE FROM autor WHERE id = %d' % id)\n return redirect(url_for('autor.index'))\n except:\n return render_template('404.html')", "def deleteOne(id):\n print(inspect.stack()[1][3])\n query = Followup.delete().where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to find the given client'}\n return {'status': \"Delete Succesful\"}", "def delete_comment(self, id, **args): \n args.update(id=id)\n return self.fetch(\"/comment/delete\", post_args=args)", "def delete(article_id):\r\n response = table.get_item(\r\n Key={'article_id': article_id}\r\n )\r\n data = response.get('Item')\r\n if data is None:\r\n flash('Unable to get Article')\r\n return redirect(url_for('article.list')) \r\n\r\n # Delete article for a particular id\r\n response = table.delete_item(\r\n Key={'article_id':article_id}\r\n )\r\n\r\n if response:\r\n flash('Article is successfully deleted')\r\n\r\n return redirect(url_for('article.list'))" ]
[ "0.7832141", "0.7315442", "0.718373", "0.71590054", "0.715206", "0.7093981", "0.7008346", "0.6998064", "0.6946827", "0.6931493", "0.6922772", "0.68966997", "0.6869891", "0.6861729", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6717501", "0.6696411", "0.6673575", "0.66603714", "0.66594744", "0.66467136", "0.66382396", "0.66345125", "0.6628265", "0.6592035", "0.6582318" ]
0.79569536
0
Function that converts category name to Python module name Eg. rwgeneric to RwGenericYang
def get_module_name_from_log_category(log_category): words = log_category.split('-') words.append('yang') return ''.join(word.capitalize() for word in words)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_module_name(layer_name):\n modules = layer_name.split('.')\n try:\n idx = modules.index('module')\n except ValueError:\n return layer_name\n del modules[idx]\n return '.'.join(modules)", "def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])", "def generate_module_name_dir(name, train_type):\n if name == ComponentName.BINNING:\n if train_type == FederatedLearningType.VERTICAL:\n return ComponentName.VERT_FEATURE_BINNING.lower()\n else:\n return ComponentName.HORZ_FEATURE_BINNING.lower()\n elif name == ComponentName.FEATURE_CALCULATION:\n if train_type == FederatedLearningType.VERTICAL:\n return ComponentName.VERT_FEATURE_CALCULATION.lower()\n else:\n raise ValueError(\"The HorzFeatureCalculation Does't Support Yet.\")\n else:\n return name.lower()", "def get_module_short_name(klass):\n return klass.__module__.rsplit('.', 1)[-1]", "def makename(package, module):\n # Both package and module can be None/empty.\n if package:\n name = package\n if module:\n name += '.' + module\n else:\n name = module\n return name", "def getMangledName(self, name, module=None):\n if module is os.path:\n return \"os.path\"\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n return self.prefix + name\n return name", "def get_nuts_category(year):\n if year >= 2016:\n return f\"nuts2_2016\"\n elif year >= 2013:\n return f\"nuts2_2013\"\n elif year >= 2010:\n return f\"nuts2_2010\"\n elif year >= 2006:\n return f\"nuts2_2006\"\n else:\n return f\"nuts2_2003\"\n\n # for t in [2016,2013,2010,2006,2003]:\n # if year >=t:\n # return(f'nuts2_{str(t)}')", "def sanitize_module_name(module_name):\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name", "def normalize_package_name(_s: str) -> str:\n return _s.replace('_', '-').lower()", "def _compression_module_type_to_attr_name(compression_module_type: CompressionModuleType):\n if compression_module_type == CompressionModuleType.FUNCTION_QUANTIZER:\n return \"function_quantizers\"\n if compression_module_type == CompressionModuleType.ACTIVATION_QUANTIZER:\n return \"activation_quantizers\"\n raise RuntimeError(\"Unknown extra module type\")", "def parse_category_label(label: str) -> str:\n return number_first_regex.sub(\n '_',\n space_regex.sub(\n '_',\n label.strip().lower().replace('*', '').replace('(', '').replace(\n ')', '').replace('.', '')))", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def get_ConTextItem_category_string(ci):\n return \"_\".join(ci.category)", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def create_importable_name(charm_name):\n return charm_name.replace(\"-\", \"_\")", "def _get_module_name(filename: str) -> str:\n return \".\".join(_get_relative(filename).split(os.path.sep)[2:]).replace(\".pyi\", \"\").replace(\".__init__\", \"\")", "def __create_classname(self, fullname):\n return PACKAGE_NAME + \".\" + fullname", "def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main", "def _project_name_to_package_name(project_name):\n return project_name.lower().replace('-', '')", "def create_charm_name_from_importable(charm_name):\n # _ is invalid in charm names, so we know it's intended to be '-'\n return charm_name.replace(\"_\", \"-\")", "def denormalize_module_name(parallel_model, normalized_name):\n fully_qualified_name = [mod_name for mod_name, _ in parallel_model.named_modules() if\n normalize_module_name(mod_name) == normalized_name]\n if len(fully_qualified_name) > 0:\n return fully_qualified_name[-1]\n else:\n return normalized_name # Did not find a module with the name <normalized_name>", "def format_category_name(category):\n\n category_words = category.name.rstrip().replace(',', '').replace(\"'\", '').split(\" \")\n return \"-\".join(category_words)", "def to_py_name(cpp_name, entry_type):\r\n if entry_type == 'function':\r\n return cpp_name\r\n first_underscore = cpp_name.find('_')\r\n assert(first_underscore != -1)\r\n return cpp_name[first_underscore + 1:]", "def process_ci_name(name):\n if name == \"Cinder_Jenkins\":\n return 'Jenkins'\n elif name:\n return name.replace('_', ' ')", "def category_part(self) -> str:\n if not self.is_old_style:\n raise ValueError('New identifiers have no category semantics')\n return self.split('/')[0]", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name", "def get_full_module_name(o, lower=False):\n if not isinstance(o, type):\n o = o.__class__\n module = o.__module__\n if module is None or module == str.__class__.__module__:\n return o.__name__\n name = module + '.' + o.__name__\n if lower:\n return name.lower()\n else:\n return name", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str = FEATURE_NAME_SUFFIX) -> str:\n key = name.replace(\".\", \"__ludwig_punct_period__\")\n return key + feature_name_suffix", "def _label_for(self, app_mod):\n return app_mod.__name__.rsplit('.',1)[0]", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str=FEATURE_NAME_SUFFIX) ->str:\n key = name.replace('.', '__ludwig_punct_period__')\n return key + feature_name_suffix" ]
[ "0.6348545", "0.62357664", "0.6161529", "0.6005554", "0.59594476", "0.5909393", "0.58015877", "0.576444", "0.5749584", "0.5743478", "0.57170683", "0.57096314", "0.5701094", "0.56758934", "0.5675464", "0.5667408", "0.56373435", "0.5630453", "0.55773044", "0.5576993", "0.55697715", "0.5569567", "0.55563915", "0.5553497", "0.5547277", "0.5545855", "0.55446887", "0.55227077", "0.5520681", "0.5513283" ]
0.7726592
0
Set Log category name to be used.
def set_category(self, category_name): try: module_name = get_module_name_from_log_category(category_name) log_yang_module = importlib.import_module('gi.repository.' + module_name) if not log_yang_module: logger.error("Module %s is not found to be added as log category for %s", module_name, category_name) print("Module %s is not found to be added as log category for %s", module_name, category_name) return for level in RwLogger.level_event_cls_map.values(): if not hasattr(log_yang_module, level): logger.error("Module %s does not have required log notification for %s", module_name, level) print("Module %s does not have required log notification for %s", module_name, level) return self._log_yang_module = log_yang_module self._log_category_name = category_name except Exception as e: logger.exception("Caught error %s when trying to set log category (%s)",repr(e), category_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rider_category_name(self, rider_category_name):\n\n self._rider_category_name = rider_category_name", "def category(self, category: str):\n\n self._category = category", "def set_scribe_category(category):\r\n LogOptions._SCRIBE_CATEGORY = category", "def set_category(self, category):\n\n\t\tif category is not None and not isinstance(category, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: category EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__category = category\n\t\tself.__key_modified['category'] = 1", "def category_name(self):\r\n return conf.lib.clang_getDiagnosticCategoryName(self.category_number)", "def category_name(self):\n return self.category.name", "def set_file_name(self):\n name = 'LogImage'\n name_log_date = time.strftime(\"%Y%m%d\")\n self.name_log = name + name_log_date + '.log'", "def set_category(self, frontmatter):\n gcates = self._global_categories\n cate_name = ''\n segments = self.path.split(os.path.sep)\n if len(segments) > 2:\n cate_name = segments[1].lower()\n else:\n cate_name = 'uncategorized'\n if cate_name not in gcates:\n gcates[cate_name] = Category(name=cate_name, config=self._config)\n this_cate = gcates[cate_name]\n this_cate.notes.append(self)\n this_cate.count += 1\n self.category = this_cate\n\n # for key in frontmatter:\n # if key.strip().lower().startswith('cate'):\n # # public\n # self.category = frontmatter[key]\n # return\n # self.category = 'general'", "def category_names(self, category_names):\n\n self._category_names = category_names", "def _change_category(cls, category):\n time_now = cls.__stop_category()\n with GlobalProvenance() as db:\n cls._category_id = db.insert_category(category, cls._machine_on)\n cls._category = category\n cls._category_time = time_now", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def rename(self, name):\n self._name = name\n self._logger = logging.getLogger(name)\n self._logger.setLevel(self._level)", "def category_name(self):\n try:\n category = self.proto.category.parent\n return f'{category.name} - {self.proto.category.name}'\n except AttributeError:\n return self.proto.category.name", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def category(self, category):\n allowed_values = [\"CALLBACK\", \"CALL_RESTRICTION\", \"CALL_RULE\", \"CAMPAIGN\", \"CAMPAIGN_RULE\", \"CONTACT\", \"CONTACT_LIST_FILTER\", \"DNC_LIST\", \"ENTITY_LIMIT\", \"IMPORT_ERROR\", \"MESSAGING_CAMPAIGN\", \"ORGANIZATION_CONFIGURATION\", \"SCHEDULE\"]\n if category.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for category -> \" + category)\n self._category = \"outdated_sdk_version\"\n else:\n self._category = category", "def category(self, category):\n allowed_values = [\"Trace\", \"Verbose\", \"Info\", \"Wait\", \"Highlight\", \"Gap\", \"Alert\", \"Warning\", \"Error\", \"Fatal\", \"Planned\", \"Updated\", \"Finished\", \"Abandoned\"] # noqa: E501\n if category not in allowed_values:\n raise ValueError(\n \"Invalid value for `category` ({0}), must be one of {1}\" # noqa: E501\n .format(category, allowed_values)\n )\n\n self._category = category", "def name(self) -> str:\n return str(self.category.value)", "def local_category(self, local_category: str):\n\n self._local_category = local_category", "def scribe_category():\r\n if LogOptions._SCRIBE_CATEGORY is None:\r\n LogOptions._SCRIBE_CATEGORY = app.get_options().twitter_common_log_scribe_category\r\n return LogOptions._SCRIBE_CATEGORY", "def category(self) -> str:\n return pulumi.get(self, \"category\")", "def get_name(self):\n return self.category_name", "def setLogFileName(self, _strLogFileName):\n self.edLogging.setLogFileName(_strLogFileName)", "def get_module_name_from_log_category(log_category):\n words = log_category.split('-')\n words.append('yang')\n return ''.join(word.capitalize() for word in words)", "def __init__(self, category):\n self.category = category\n self.name = \"Filters.document.category('{}')\".format(self.category)", "def rename_cats(self, **mapping):\n if self.is_categorised:\n self.cats = self.cats.rename(columns=mapping)\n else:\n raise NotCategorisedError", "def category(self, category: Category):\n\n self._category = category" ]
[ "0.71033996", "0.656124", "0.6451363", "0.64011025", "0.63785875", "0.6319959", "0.6303118", "0.6279708", "0.6278118", "0.61796457", "0.608429", "0.608429", "0.608429", "0.608429", "0.608429", "0.60362625", "0.6006342", "0.59686434", "0.5840249", "0.57967573", "0.57849914", "0.5779494", "0.5769242", "0.57586586", "0.57558995", "0.57198656", "0.56977445", "0.5682481", "0.56563765", "0.56287086" ]
0.7935372
0
Tests whether ``TextInputStyle`` instance values are all the expected value type.
def test__TextInputStyle__value(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def is_text(self):\n return self.value_type in (str, unicode)", "def checkType(self, value):\n pass", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def isStringStyle(self, style):\n return style in [QsciLexerJava.DoubleQuotedString,\n QsciLexerJava.SingleQuotedString,\n QsciLexerJava.UnclosedString,\n QsciLexerJava.VerbatimString]", "def isStringStyle(self, style):\n return style in [QsciLexerCSS.DoubleQuotedString,\n QsciLexerCSS.SingleQuotedString]", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def _validate_value_type(value: Any, expected: Sequence[Type]) -> bool:\n\n for entry in expected:\n if get_origin(entry) is None:\n if type(value) == entry: # pylint: disable=unidiomatic-typecheck\n return True\n continue\n if _validate_value_type(value, get_args(entry)):\n return True\n return False", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def validate(self,value):\r\n return type(value) is self.datatype", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, str) and self._validate_length(value))\n else:\n return False", "def accepts(cls, value: Any) -> bool:\n try:\n cls.convert(value)\n return True\n except ValueError:\n return False", "def validGameSettings(self):\n if not isinstance(self.view, GView):\n return False\n if not isinstance(self.input, GInput):\n return False\n validStates = [STATE_INACTIVE, STATE_NEWWAVE, STATE_ACTIVE,\n STATE_PAUSED, STATE_CONTINUE, STATE_COMPLETE]\n if not self.getState() in validStates:\n return False\n if not self.getWave() is None or isinstance(self.getWave(), Wave):\n return False\n if not self.getText() is None or isinstance(self.getText(), GLabel):\n return False\n return True", "def is_input(self):\n # https://html.spec.whatwg.org/multipage/forms.html#category-submit\n if self.style['appearance'] == 'auto' and self.element is not None:\n if self.element.tag in ('button', 'input', 'select', 'textarea'):\n return not isinstance(self, (LineBox, TextBox))\n return False", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True", "def _check_dtype(self):\n\n # assert valid dtype\n if self.dtype not in PRIMITIVE_TYPES:\n raise ValueError(\"Type '{}' is invalid. Following types are \"\n \"allowed: {}\"\n .format(self.dtype, PRIMITIVE_TYPES.keys()))\n\n # assert valid dtypes for values\n allowed_types = PRIMITIVE_TYPES[self.dtype]\n\n for value in self.values:\n if not isinstance(value, allowed_types):\n raise TypeError(\"Column '{}' has invalud value '{}' with \"\n \"invalid type '{}'. Allowed types are: {}.\"\n .format(self.name,\n value,\n type(value),\n allowed_types))", "def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def CheckType(self, *args, **kwargs):\n pass", "def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def clean_values(cls, cleaned_input, attribute):\n values_input = cleaned_input.get(cls.ATTRIBUTE_VALUES_FIELD)\n attribute_input_type = cleaned_input.get(\"input_type\") or attribute.input_type\n\n if values_input is None:\n return\n\n if (\n values_input\n and attribute_input_type not in AttributeInputType.TYPES_WITH_CHOICES\n ):\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"Values cannot be used with \"\n f\"input type {attribute_input_type}.\",\n code=AttributeErrorCode.INVALID.value,\n )\n }\n )\n\n is_swatch_attr = attribute_input_type == AttributeInputType.SWATCH\n for value_data in values_input:\n cls._validate_value(attribute, value_data, is_swatch_attr)\n\n cls.check_values_are_unique(values_input, attribute)", "def validate(self):\n self._check_type()", "def validate_value(self, value: valueType) -> bool:\n if value is None:\n raise Exception\n return True", "def test_not_blank_validator_valid_value_should_return_true(self):\n for item in self.stdtype_fixtures:\n self.assertTrue(NotBlankValidator(TypeHint(item.get('type')), item.get('valid')))" ]
[ "0.61332947", "0.59603673", "0.5806856", "0.5770944", "0.57419115", "0.5711008", "0.5696463", "0.567288", "0.5661682", "0.5579446", "0.55630475", "0.5551327", "0.55341244", "0.5487145", "0.54515827", "0.5438341", "0.54083705", "0.54033923", "0.5351363", "0.5323914", "0.53112197", "0.5295399", "0.52837473", "0.52836823", "0.5283626", "0.52661467", "0.5265321", "0.5258055", "0.5244037", "0.5240988" ]
0.741814
0
Tests that example.com was in the dashboard.
def test_link_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, "example.com")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)", "def test_dashboard_is_up(dashboard_address):\n response = requests.get(f\"{dashboard_address}/health\")\n assert response.status_code == 200\n assert response.text == \"ok\"", "def test_landing_page(self):\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.get('/', follow_redirects=True)\n\n # Compare result.data with assert method\n self.assertIn(b'<p class=\"navbar-text\">Already have an account?</p>', \n result.data)", "def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)", "def test_dashboard_page_status(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def test_dashboards_v2_show(self):\n pass", "def test_analytics_id(self):\n response = self.client.get(reverse('home'))\n self.assertContains(response, 'MyAwesomeAnalyticsCode')", "def test_visit(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assert response.status_code == 200", "def test_important_page(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n self.assertIn(\"Email\", result.data)", "def test_showing_dietitian_homepage(self):\n\n result = self.client.get(\"/dietitian/1\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Dietitian Dashboard\", result.data)\n\n result = self.client.get(\"/dietitian/2\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def you_should_see_the_dashboard(driver):\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)", "def test_homepage_anon(self):\r\n\r\n with self.client:\r\n response = self.client.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'United States News', response.data)", "def test_dashboards_v2_link(self):\n pass", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n self.get_DashboardPage(\"Server Utilization\")\n \n self.get_DashboardPage(\"Total Server Utilization\")\n \n self.logout()", "def test_dashboard_view(self):\n target_url = url_for('dashboard.dashboard_panel')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_dashboard_has_dashboard_in_title(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Dashboard\").click()\n self.assertTrue('Dashboard' in self.browser.title, 'Dashboard did not have \"Dashboard\" in title')", "def test_visit(self, client, site, content_page):\n response = client.get(content_page.relative_url(site))\n assert response.status_code == 200", "def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_dashboards_v2_request_access(self):\n pass", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Dietitian Dashboard\", result.data)", "def test_get_ok(test_case, page):\n with test_case.app.test_client() as c:\n test_case.assertEqual(200, c.get('dashboard/{}'.format(page)).status_code)", "def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))", "def test_dashboard_loads_properly(self):\n response = self.client.get('your_server_ip:8000/auth/login/expense')\n self.assertEqual(response.status_code, 404)", "def test_tenant_external_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.domain.domain)\n self.assertEqual(response.status_code, 200)", "def test_link_registered(self):\n response = self.client.get(reverse('misago:admin:users:accounts:index'))\n\n response = self.client.get(response['location'])\n self.assertContains(response, reverse('misago:admin:users:bans:index'))", "def test_dashboard_bad_urls(self):\n url = reverse('shipping.views.dashboard')\n # Fail\n response = self.client.get(url, dict(av=\"junk\"))\n eq_(response.status_code, 404)\n response = self.client.get(url, dict(ms=\"junk\"))\n eq_(response.status_code, 404)\n\n # to succeed we need sample fixtures\n appver, milestone = self._create_appver_milestone()\n\n # Succeed\n response = self.client.get(url, dict(ms=milestone.code))\n eq_(response.status_code, 200)\n response = self.client.get(url, dict(av=appver.code))\n eq_(response.status_code, 200)", "def test_dashboard(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1995')\n self.assertIn(b'Create a Recipe Category', rv.data)", "def test_redirection(self):\n dashboard_url = reverse('dashboard')\n self.assertRedirects(self.response, dashboard_url)", "def test_login_required_dashboard(self):\r\n response = self.client.get(reverse('dashboard'))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['Location'], 'http://testserver/accounts/login?next=/dashboard')" ]
[ "0.7439744", "0.71425265", "0.6988231", "0.69753", "0.6952687", "0.69291663", "0.69152224", "0.69117343", "0.68734396", "0.68194866", "0.67331254", "0.673261", "0.6697194", "0.66839606", "0.66659987", "0.66591775", "0.6643598", "0.66313016", "0.6605587", "0.65915424", "0.65859234", "0.6574944", "0.65737844", "0.6572512", "0.6559295", "0.6555671", "0.65385664", "0.64996845", "0.64979285", "0.64962834" ]
0.7536914
0
Tests that the admin list found the User and Group admins
def test_admin_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/admin/auth/group/">Group</a>', html=True) self.assertContains(response, '<a href="/admin/auth/user/">User</a>', html=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_cannot_remove_all_admins(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'admin1'})\n assert r.json[\n 'error'] == 'You must have at least one user with the Admin role.'\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_01_admin_index(self):\r\n self.register()\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"There should be an index page for admin users and apps\"\r\n assert \"Settings\" in res.data, err_msg\r\n divs = ['featured-apps', 'users', 'categories', 'users-list']\r\n for div in divs:\r\n err_msg = \"There should be a button for managing %s\" % div\r\n assert dom.find(id=div) is not None, err_msg", "def test_admin_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # Admins can see everything\n response = self.client.get(reverse('api:log-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], self.object_count)\n\n # Deletion should be possible\n response = self.client.post(reverse('api:log-erase'), {\n 'before': str(timezone.now()),\n 'max_severity': LogEntry.ERROR,\n })\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['deleted'], self.object_count)\n self.assertEqual(LogEntry.objects.count(), 0)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_admin(self):\n assert(admin)", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_number_of_group_admins(self):\n\n group0 = self.test_save(name='group1')\n group1 = self.test_save(name='group2')\n user0 = self.user\n user1 = self.user1\n \n group0.user_set.add(user0)\n group0.user_set.add(user1)\n user0.grant(\"admin\", group0)\n group1.user_set.add(user0)\n group1.user_set.add(user1)\n\n self.assertEqual(number_group_admins(group0), 1)\n self.assertEqual(number_group_admins(group1), 0)\n user1.grant(\"admin\", group1)\n self.assertEqual(number_group_admins(group1), 1)\n user1.grant(\"admin\", group0)\n self.assertEqual(number_group_admins(group0), 2)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def user_is_admin(user):\n return user in admins", "def test_is_admin_user(self):\n admin = User.objects.get(email='[email protected]')\n self.assertEqual(admin.is_staff, True)", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "def test_new_admin_subscriptions(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n with audits('add user test-user to Admin'):\n self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n p_nbhd = M.Neighborhood.query.get(name='Projects')\n p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)\n uid = M.User.by_username('test-user')._id\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert sub, 'New admin not subscribed to app %s' % ac\n\n \"\"\"\n When user is removed from admins group then user must be unsubscribed\n from all the tools in the project\n \"\"\"\n self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert not sub, 'New admin not unsubscribed to app %s' % ac", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def test_subroles(self):\n def check_roles(r):\n dev_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[2]\n mem_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[3]\n assert 'All users in Admin group' in dev_holder.text\n assert 'All users in Developer group' in mem_holder.text\n\n r = self.app.get('/admin/groups/')\n\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n # test that subroles are intact after user added\n with audits('add user test-user to Admin'):\n r = self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)\n # test that subroles are intact after user deleted\n with audits('remove user test-user from Admin'):\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)", "def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))", "def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def get_admins(self):\n return self.admins_group.user_set.all()", "def test_add_admin_to_org(self):\n pass", "def get_admin_users() -> User:\n return User.objects.filter(group__name__contains=\"admin\")", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def is_admin(self, user):\n return user.name in self.admins" ]
[ "0.7459246", "0.7412557", "0.74001384", "0.7367604", "0.7157066", "0.70924866", "0.70496404", "0.70242584", "0.70158803", "0.7010552", "0.69874895", "0.696997", "0.69406426", "0.6914392", "0.6912082", "0.6910139", "0.69032836", "0.68800515", "0.68568987", "0.6850447", "0.6785174", "0.67598534", "0.6746246", "0.67421323", "0.67271703", "0.6719937", "0.6703436", "0.6695416", "0.6657125", "0.66529626" ]
0.79272
0
Backup the git refs.
def backup_ref(self): # Back ourselves up! backup_ref="refs/backups/{0}-{1}-{2}".format(self.ref_type, self.ref_name, int( time.time() )) command = ("git", "update-ref", backup_ref, self.old_sha1) process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t\t\t\t\t# just add them all ... probably a better/safer/more direct way to do this\n\t\t\t\t\t_ = sp.check_output(['git', 'add', '.'])\n\t\t\t\t_ = sp.check_output([\n\t\t\t\t\t\t\"git\", \"commit\", \"-am\", f\"AUTO update on {dt.date.today().isoformat()}\"])\n\n\t\t\t# presumes that there is a remote!\n\t\t\toutput = sp.check_output([\n\t\t\t\t\t\"git\", \"push\"],\n\t\t\t\t\tstderr=sp.STDOUT\n\t\t\t\t\t)\n\n\t\t\treturn output.decode()\n\t\t\t# else:\n\t\t\t# \treturn 'No changes to commit'", "def _stash_and_checkout(repo, version):\n repo.git.stash()\n repo.git.checkout(version)\n repo.git.clean(\"-df\")", "def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)", "def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def revert(self, ref):\n self._git.head.commit = ref\n self._git.head.reset(index=True, working_tree=True)", "def _save_state(self):\n with open(os.path.join(self._workdir, '.git', 'drover'), 'wb') as f:\n cPickle.dump(self, f)", "def __makeBackup(self):\n pass #FIXME!!!", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def __gitHouseKeeping(self):\n self.vcs.gitHouseKeeping(self.project.getProjectPath())", "def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)", "def backup(ctx, project, origin, force):\n\n if not check_main_conf(ctx):\n return\n\n if origin is not None and project is None:\n click.echo(\"--project option is required when --origin is set.\")\n return\n\n bkp = ctx.obj[\"bkp\"]\n\n if not os.path.exists(ctx.obj[\"PROJECTS_DIR\"]):\n click.echo(\"Projects directory doesn't exists at %s\" % ctx.obj[\"PROJECTS_DIR\"])\n return\n\n if project is not None:\n bkp.project_load(project_name=project)\n bkp.backup(origin=origin, force=force)\n else:\n for file in os.listdir(ctx.obj[\"PROJECTS_DIR\"]):\n if file.endswith(\".conf\"):\n project_name = file.replace(\".conf\", \"\")\n bkp.project_load(project_name=project_name)\n bkp.backup(origin=origin, force=force)", "def git_upgraded_pkgs(self):\n\n self.extract_from_cachedir()\n self.etc_commits.added.commit()\n\n cherry_pick_sha = None\n if self.etc_commits.cherry_pick.rpaths:\n self.etc_commits.cherry_pick.commit()\n cherry_pick_sha = self.repo.git_cmd('rev-list -1 HEAD --')\n\n # Clean the working area of the files that are not under version\n # control.\n self.repo.git_cmd('clean -d -x -f')\n\n # Update the master-tmp branch with new files.\n if self.master_commits.added.rpaths:\n self.repo.checkout('master-tmp')\n for rpath in self.master_commits.added.rpaths:\n repo_file = os.path.join(self.repodir, rpath)\n if os.path.lexists(repo_file):\n warn('adding %s to the master-tmp branch but this file'\n ' already exists' % rpath)\n copy_file(rpath, self.root_dir, self.repodir,\n repo_file=repo_file)\n self.master_commits.added.commit()\n\n return cherry_pick_sha", "def backup(self):\n\n for filename in self.filenames[:]:\n if not filename.endswith(\".\"+self.PYTHON_EXTENSION):\n continue\n origfilename = filename + \".\" + self.BACKUP_EXTENSION\n if origfilename not in self.filenames:\n shutil.copy(filename, origfilename)\n self.filenames.append(origfilename)", "def automatic_backup(self):\n\n if self.observationId:\n logging.info(\"automatic backup\")\n self.save_project_activated()", "def save_backup(\n self):\n self.backup = self.data", "def hard_reset_branches(args):\n checkout_branches(args)\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Hard resetting tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n repo.check_command([\"reset\", \"--hard\", project.remote_refspec])", "def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()", "def dump_refs(args):\n man = load_manifest()\n first = True\n for (name, project) in man.projects.iteritems():\n if not first: print\n first = False\n print \"Project %s:\" % name\n\n repo = GitRepo(workdir_for_project(project))\n print \" HEAD: %s\" % repo.rev_parse(\"HEAD\")\n print \" Symbolic: %s\" % repo.current_branch()\n project_status(project, indent=2)\n\n repo = get_manifest_repo()\n if repo:\n print\n print \"Manifest repo:\"\n print \" HEAD: %s\" % repo.rev_parse(\"HEAD\")\n print \" Symbolic: %s\" % repo.current_branch()\n repo_status(repo,\n repo.current_branch(),\n \"origin/\" + repo.current_branch(),\n indent=2)\n check_dirty_repo(repo, indent=2)", "def svn_fs_hotcopy(*args):\r\n return _fs.svn_fs_hotcopy(*args)", "def __gitStashClear(self):\n self.vcs.gitStashClear(self.project.getProjectPath())", "def backup_database(self):\n backup_file = \"{}-{}.sql\".format(\n config.DATABASE_NAME, datetime.today().strftime(\"%Y-%m-%d--%H%M\")\n )\n backup_uri = \"{}/{}\".format(config.DATABASE_BACKUP_BUCKET, backup_file)\n step = \"Backing Up Database:\\nbackup={}\".format(backup_uri)\n try:\n self.slacker.send_thread_reply(step)\n backup_command = [\n \"gcloud\",\n \"sql\",\n \"export\",\n \"sql\",\n config.DATABASE_INSTANCE_NAME,\n backup_uri,\n \"--database={}\".format(config.DATABASE_NAME),\n \"--verbosity=debug\",\n ]\n subprocess.run(backup_command, check=True)\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def backup(self, backup):\n self._backup = backup", "def __restoreBackup(self):\n pass #FIXME!!!", "def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def __gitStashDrop(self):\n self.vcs.gitStashDrop(self.project.getProjectPath())", "def clean(self):\n self.run(['git', 'reset', '--hard', 'HEAD'])\n self.run(['git', 'clean', '-fdx'])\n self.run(['git', 'checkout', 'origin/master'])", "def backups(self, backups):\n\n self._backups = backups" ]
[ "0.6952309", "0.6250587", "0.6083144", "0.6023908", "0.6018048", "0.5927529", "0.5912871", "0.58550334", "0.5830668", "0.5823843", "0.5808333", "0.5635343", "0.5619511", "0.5558526", "0.5550184", "0.54987204", "0.54584205", "0.54354334", "0.54245806", "0.53884566", "0.5356423", "0.5344026", "0.5339792", "0.53226286", "0.53070134", "0.5303594", "0.53003895", "0.5298812", "0.5298441", "0.52726203" ]
0.7884595
0
Whether the audit failed (True) or passed (False).
def audit_failed(self): return self.__failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL", "def is_failed(self):\n\n return self._state == \"FAILED\"", "def is_failed(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-failed').succeeded", "def passed(self):\n return not self.failed()", "def is_failing(self):\n return self.current_state == self.States.FAILED", "def didFail(self):\n return self.state in (\"cancelled\", \"failed\")", "def __bool__(self) -> bool:\n return self.failed", "def is_successful(self):\n for item in self.summary:\n if item['task_status'] is False:\n return testcase.TestCase.EX_TESTCASE_FAILED\n\n return super().is_successful()", "def isfailure(self):\n\n return self.proc.returncode != 0", "def didFail(self):\n return self._state in self._FailedStates", "def failed(self) -> bool:\n return not self.ok", "def was_successful(self):\n return self.data.exception_type is None or \\\n self.data.exception_type in TestOutcome.POSITIVE_RESULTS", "def is_successful(self):\n skips = self.details.get(\"skipped_number\", 0)\n if skips > 0 and self.deny_skipping:\n return testcase.TestCase.EX_TESTCASE_FAILED\n if self.tests_count and (\n self.details.get(\"tests_number\", 0) != self.tests_count):\n return testcase.TestCase.EX_TESTCASE_FAILED\n return super().is_successful()", "def failed(self):\n if len(self.progress) > 0:\n return self.progress[-1].status == TestStatus.canceled\n return False", "def has_failed(self):\n return self._error is not None", "def had_error(self):\n return self.data.exception_type == TestOutcome.ERROR", "def failed(self):\n return len(self.failed_outputs) > 0 or len(self.errors) > 0", "def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False", "def is_failing(self):\n if self.data.exception_type is None:\n return False\n\n if self.mode in (MODE_CRITICAL, MODE_FINALLY) and \\\n self.data.exception_type not in TestOutcome.POSITIVE_RESULTS:\n return True\n\n if self.mode in (MODE_OPTIONAL,) and \\\n self.data.exception_type not in TestOutcome.UNCRITICAL_RESULTS:\n return True\n\n return False", "def indicate_failure(self):\n pass", "def passed(self):\n if self.result == RESULT_PASS:\n return True\n\n return False", "def is_failed_user_data_retrieval(self):\n return self._tag == 'failed_user_data_retrieval'", "def failed(self):\n output = self.__call__()\n return output.failed", "def failed_roboscript(self) -> bool:\n return pulumi.get(self, \"failed_roboscript\")", "def zero_failures(self) -> bool:\n return abs(self.failurerate) < 1e-7", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def failed(self):\n return self.joe.dead", "def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)", "def server_failure(self, resp):\n return resp[0] in FAILURE_CODES", "def result(self):\n result = True\n if self.state != \"error\":\n if self.tests_run < len(self.tests):\n result = False\n else:\n failed = [test for test in self.tests if test.test_result == False]\n if failed:\n result = False\n else:\n result = False\n\n return result" ]
[ "0.70987207", "0.7077051", "0.6967613", "0.6923564", "0.68601596", "0.6790287", "0.67796767", "0.6752796", "0.67493594", "0.67102164", "0.6676905", "0.667684", "0.6542292", "0.64718133", "0.6417293", "0.64025366", "0.63911164", "0.634746", "0.6297389", "0.62367797", "0.62313586", "0.6224354", "0.6202326", "0.617334", "0.61608654", "0.6149201", "0.6146332", "0.61417973", "0.6123227", "0.6112711" ]
0.76317364
0
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = self.commit\n else:\n col = self.columns[i]\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n seen_this = True\n self.buf += '*'\n chars_written += 1\n\n if self.num_parents > 2:\n chars_written += self._draw_octopus_merge()\n elif seen_this and self.num_parents > 2:\n self._write_column(col, '\\\\')\n chars_written += 1\n elif seen_this and self.num_parents == 2:\n # This is a 2-way merge commit. There is no\n # GraphState.PRE_COMMIT stage for 2-way merges, so this is the\n # first line of output for this commit. Check to see what the\n # previous line of output was.\n #\n # If it was GraphState.POST_MERGE, the branch line coming into\n # this commit may have been '\\', and not '|' or '/'. If so,\n # output the branch line as '\\' on this line, instead of '|'.\n # This makes the output look nicer.\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n if self.num_parents > 1:\n self._update_state(GraphState.POST_MERGE)\n elif self._is_mapping_correct():\n self._update_state(GraphState.PADDING)\n else:\n self._update_state(GraphState.COLLAPSING)", "def test_no_final_eol(self, env: yaenv.Env):\n from tempfile import mkstemp\n env.envfile = mkstemp()[-1]\n with open(env, 'w') as f:\n f.write('EOL=no')\n env['BLANK'] = ''\n with open(env, 'r') as f:\n assert len(f.readlines()) == 2", "def eat_EOL(self):\n # print(\"Start eating EOL\")\n self.eat(EOL)\n while self.current_token.type == EOL:\n self.eat(EOL)\n # print(\"Stop eating EOL\")", "def escape_eol_chars(options):\n pass", "def log(self, chars):\n self.insert(END, chars+'\\n')\n self.see(END)\n self.update()", "def _(event):\n if line.is_multiline:\n line.newline()\n else:\n if line.validate():\n cli_ref().line.add_to_history()\n cli_ref().set_return_value(line.document)", "def _endline(line):\n return line.rstrip() + '\\n'", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_end_of_line_single_char_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.1\", \"7.1\"),\n command_name=\"end-of-line\",\n )", "def test_dos_eol():\n import figleaf, figleaf.annotate_html\n \n figleaf.start()\n execfile(os.path.join(thisdir, 'tst_dos_eol.py'))\n figleaf.stop()\n\n coverage = figleaf.get_data().gather_files()\n\n tmpdir = tempfile.mkdtemp('.figleaf')\n\n try:\n figleaf.annotate_html.report_as_html(coverage, tmpdir, [], {})\n finally:\n files = glob.glob('%s/*' % (tmpdir,))\n for f in files:\n os.unlink(f)\n os.rmdir(tmpdir)", "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def convert_line_endings():\n files = []\n for ext in [\n \".py\",\n \".sh\",\n \"Dockerfile\",\n \".txt\",\n \".csv\",\n \".mhd\",\n \".gitignore\",\n ]:\n files.extend(Path(\".\").glob(f\"**/*{ext}\"))\n\n for file in files:\n with open(str(file), \"rb\") as f:\n lines = f.read()\n\n lines = lines.replace(EOL_WIN, EOL_UNIX).replace(EOL_MAC, EOL_UNIX)\n\n with open(str(file), \"wb\") as f:\n f.write(lines)", "def do_EOF(self, line):\n print()\n models.storage.save()\n return True", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def do_EOF(self, line):\n print(\"\")\n return True", "def fix_line_endings(fname, eol=b'\\n'):\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)", "def logwrite(self, line):\n sql = b\"update log set log_text=concat(log_text,'\" + self.__timestamp() + line + \"') where log_id=\" + self.logid +\";\\n\"\n self.logme.stdin.write(sql)\n self.logme.stdin.flush()\n return True", "def _check_last_character(line_index, input_line, code_character):\n global _total_lines_of_code\n if input_line.endswith(code_character):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def GetEOLChar(self):\n m_id = self.GetEOLMode()\n if m_id == wx.stc.STC_EOL_CR:\n return u'\\r'\n elif m_id == wx.stc.STC_EOL_CRLF:\n return u'\\r\\n'\n else:\n return u'\\n'", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def eol(self):\n return self.pos == len(self.tokens)", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def do_EOF(self, line):\n print()\n return True", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)" ]
[ "0.6151211", "0.6065239", "0.57468516", "0.5741316", "0.5723494", "0.5639385", "0.5574638", "0.5561204", "0.5554823", "0.55486727", "0.553186", "0.5530341", "0.55275774", "0.5481987", "0.54660696", "0.540383", "0.5398025", "0.5388231", "0.53565466", "0.53498983", "0.5348075", "0.53350383", "0.53350383", "0.53350383", "0.5301547", "0.5287191", "0.52785945", "0.5272429", "0.5216899", "0.5199439" ]
0.83223575
0
Audit the file names in the commit.
def audit_filename(self): for commit in self.repository.commits.values(): for filename in commit.files_changed: if commit.files_changed[ filename ]["change"] not in ["A","R","C"]: continue for restriction in self.filename_limits: if re.search(restriction, filename): self.__log_failure(commit.sha1, "Invalid filename: " + filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def commit_names(self, commit):\n return []", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def commit (files):\n\n version = get_tag(comp_versions, 'ACE')\n root_path = get_path()\n files = [i[len(root_path):] if i.startswith(root_path) else i for i in files]\n\n print (\"Committing the following files for \" + version + ':', \" \".join (files))\n\n if opts.take_action:\n for file in files:\n print (\"Adding file \" + file + \" to commit\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git add \" + file)\n\n ex (\"cd $DOC_ROOT/ACE_TAO && git commit -m\\\"\" + version + \"\\\"\")", "def saveStatResults(self, changes, file_stats):\n\n # commit_obj = rpc.RpcProxy('software_dev.commit')\n fchange_obj = rpc.RpcProxy('software_dev.filechange')\n \n commit_ids = []\n for chg in changes:\n if not chg.number:\n continue\n commit_ids.append(chg.number)\n \n while len(commit_ids) and len(file_stats):\n cid = commit_ids.pop() # so, we attribute the stats to the\n # last commit that matches their files\n fc_ids = fchange_obj.search([('commit_id','=', cid)])\n fcres = fchange_obj.read(fc_ids, ['filename'])\n # We read all the filenames that belong to the commit and\n # then try to see if we have any stats for them.\n if not fcres:\n continue\n for fcd in fcres:\n fcstat = file_stats.pop(fcd['filename'], False)\n if not fcstat:\n continue\n # now, we have a filechange.id and stats\n fchange_obj.write(fcd['id'], fcstat)", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def touched_files(self, parent):", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}", "def amend_commit_with_file(tmp_file_name):\n command = f\"git commit --amend --allow-empty -F {tmp_file_name}\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def get_filenames_in_commit(git_reference: str = \"\"):\n c = cmd.run(f\"git show --name-only --pretty=format: {git_reference}\")\n if c.return_code == 0:\n return c.out.strip().split(\"\\n\")\n else:\n raise GitCommandError(c.err)", "def _log_changed_names(changed_names: Iterable[Tuple[str, str]]) -> None:\n if not changed_names:\n return\n from .utils import logger\n\n logger.warning(\"New names:\")\n for orig_name, new_name in changed_names:\n logger.warning(\"* %r -> %r\", orig_name, new_name)", "def audit_emails_in_metadata(self):\n\n # Iterate over commits....\n disallowed_domains = [\"localhost\", \"localhost.localdomain\", \"(none)\", \"bombardier.com\", \"rail.bombardier.com\"]\n for commit in self.repository.commits.values():\n for email_address in [ commit.committer_email, commit.author_email ]:\n # Extract the email address, and reject them if extraction fails....\n extraction = re.match(\"^(\\S+)@(\\S+)$\", email_address)\n if not extraction:\n self.__log_failure(commit.sha1, \"Seemingly invalid email address: \" + email_address)\n continue\n\n # Don't allow domains which are disallowed...\n domain = extraction.group(2)\n if domain in disallowed_domains:\n self.__log_failure(commit.sha1, \"Email address using a blocked domain: \" + email_address)\n continue\n\n # Ensure they have a valid MX/A entry in DNS....\n try:\n dns.resolver.query(domain, \"MX\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel):\n try:\n dns.resolver.query(domain, \"A\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)\n except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)", "def FormatSubversionPropertyChanges(filename, props):\r\n prop_changes_lines = [\r\n \"Property changes on: %s\" % filename,\r\n \"___________________________________________________________________\"]\r\n for key, value in props:\r\n prop_changes_lines.append(\"Added: \" + key)\r\n prop_changes_lines.append(\" + \" + value)\r\n return \"\\n\".join(prop_changes_lines) + \"\\n\"", "def get_changed_files_from(old_commit_sha, new_commit_sha):\n return check_output(\n \"git diff-tree --no-commit-id --name-only -r {0}..{1}\".format(\n old_commit_sha,\n new_commit_sha\n ).split(\" \")\n ).decode('utf-8').strip()", "def changed(self, filename='.md5', glob=None):\n if glob is not None:\n filename += '.glob-' + ''.join(ch.lower()\n for ch in glob if ch.isalpha())\n return changed(self, filename, glob=glob)", "def audit(self, message):\n channel = self.config.get('AUDIT_CHANNEL', False)\n log_file = self.config.get('AUDIT_FILE', False)\n if channel: outputs.append([channel, message])\n if log_file:\n with open(log_file, 'a') as f: f.write(message)\n logging.warning('AUDIT: ' + message)", "def _audit_cli_args(self):\n\n args = [\n \"--operation=audit\",\n \"--operation=status\",\n \"--logtostderr\",\n ]\n\n return args", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def onApply(self, event):\n\n # Rename all of the files based on the substitution.\n for (old, new) in zip(self.m_diskNames, self.m_newNames):\n if old != new:\n old = os.path.join(self.m_curPath, old)\n new = os.path.join(self.m_curPath, new)\n try:\n os.rename(old, new)\n except OSError:\n pass\n\n # Now we out the lists so that what the user sees after this\n # reflects what's on disk.\n self.m_diskNames[:] = []\n self.m_newNames[:] = []\n\n # Update.\n self.updateDiskFileList()", "def _add_commit_sha1_to_lists(self):\n sha1_num_commits = \"-\" + self.commit_number\n sha1_args = [sha1_num_commits, \"--pretty=%h\"]\n # git log -[N] --pretty=%h ===> newline delimited list of SHA1 x N commit\n sha1_string = self.git.log(sha1_args)\n # do not modify to os.linesep, Win fails tests with this change\n self.commit_sha1_list = sha1_string.split(\"\\n\")", "def get_changed_files(self, old_commit, new_commit):\n if old_commit is not None and not self.pygit.descendant_of(\n new_commit, old_commit\n ):\n raise ValueError(\"Second commit must be a descendant of first commit\")\n\n old_index = pygit2.Index()\n new_index = pygit2.Index()\n if old_commit is not None:\n old_tree = self.pygit.get(old_commit).tree\n old_index.read_tree(old_tree)\n else:\n # This is a special hash that represents an empty tree\n old_tree = self.pygit.get(\"4b825dc642cb6eb9a060e54bf8d69288fbee4904\")\n\n new_tree = self.pygit.get(new_commit).tree\n new_index.read_tree(new_tree)\n\n for patch in self.pygit.diff(old_tree, new_tree):\n if patch.delta.status_char() != \"M\":\n continue\n\n if not patch.delta.new_file.path.startswith(\"locales/\"):\n continue\n\n old_file_oid = old_index[patch.delta.old_file.path].oid\n new_file_oid = new_index[patch.delta.new_file.path].oid\n old_file = self.pygit.get(old_file_oid)\n new_file = self.pygit.get(new_file_oid)\n yield patch.delta.new_file.path, old_file.data, new_file.data", "def stage_changes(c):\n c.run(f\"git add -u\")", "def get_filenames(commit: git.Commit) -> List[str]:\n\n if not commit.parents:\n return []\n diffs = commit.tree.diff(commit.parents[0])\n # Sometimes a path is in A and not B but we want all filenames.\n return sorted(\n {diff.a_path for diff in diffs if diff.a_path is not None}\n | {diff.b_path for diff in diffs if diff.b_path is not None}\n )", "def log_revision(self, revision):\n to_file = self.to_file\n\n date_str = format_date(revision.rev.timestamp,\n revision.rev.timezone or 0,\n self.show_timezone,\n date_fmt='%Y-%m-%d',\n show_offset=False)\n\n authors = revision.rev.get_apparent_authors()\n to_file.write('%s %s\\n\\n' % (date_str, \", \".join(authors)))\n\n if revision.delta is not None and revision.delta.has_changed():\n for c in revision.delta.added + revision.delta.removed + \\\n revision.delta.modified:\n path, = c[:1]\n to_file.write('\\t* %s:\\n' % (path,))\n for c in revision.delta.renamed:\n oldpath, newpath = c[:2]\n # For renamed files, show both the old and the new path\n to_file.write('\\t* %s:\\n\\t* %s:\\n' % (oldpath, newpath))\n to_file.write('\\n')\n\n if not revision.rev.message:\n to_file.write('\\tNo commit message\\n')\n else:\n message = revision.rev.message.rstrip('\\r\\n')\n for l in message.split('\\n'):\n to_file.write('\\t%s\\n' % (l.lstrip(),))\n to_file.write('\\n')", "def changed_files(self, base=None, remote=None, single_commit=None):\n if single_commit:\n cmd = ['git', 'diff', '{}^!'.format(single_commit), '--name-only']\n elif base and remote:\n if base == 'WORKING':\n cmd = ['git', 'diff', remote, '--name-only']\n elif base == 'INDEX':\n cmd = ['git', 'diff', '--staged', remote, '--name-only']\n else:\n cmd = ['git', 'diff', base, remote, '--name-only']\n else:\n raise HTTPError(400, 'Either single_commit or (base and remote) must be provided')\n\n \n response = {}\n try:\n stdout = subprocess.check_output(\n cmd, \n cwd=self.root_dir,\n stderr=subprocess.STDOUT\n )\n response['files'] = stdout.decode('utf-8').strip().split('\\n')\n response['code'] = 0\n except CalledProcessError as e:\n response['message'] = e.output.decode('utf-8')\n response['code'] = e.returncode\n\n return response" ]
[ "0.67635244", "0.63775945", "0.6034091", "0.5988995", "0.58283305", "0.5755764", "0.5654512", "0.56192213", "0.5432775", "0.5430367", "0.52749014", "0.5257821", "0.52496487", "0.5248489", "0.5241954", "0.5239385", "0.5227689", "0.5223898", "0.5222077", "0.52184683", "0.52133906", "0.52131665", "0.5185653", "0.51828104", "0.5182539", "0.515562", "0.5154641", "0.51522595", "0.5150393", "0.5134131" ]
0.7895987
0
Audit names in commit metadata. Names which do not have a first name and a surname are extremely uncommon and when present are therefore generally invalid. As we want people to use their actual name when committing we do some checks to make sure that what looks like an actual name is present.
def audit_names_in_metadata(self): # Iterate over commits.... for commit in self.repository.commits.values(): for name in [ commit.committer_name, commit.author_name ]: # Is the name whitelisted? if name in self.FullNameWhitelist: continue # As a special case, allow the name 'GitHub' for certain repositories if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist: self.__log_warning(commit.sha1, "Commit has username 'GitHub' (web merge of PR); allowing anyway") continue # Check to see if the name contains spaces - if not - it is probably misconfigured.... if " " not in name.strip(): self.__log_failure(commit.sha1, "Non-full name: " + name) continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def _maybe_set_name(self) -> None:\n if not self.name:\n if isinstance(self.github, dict):\n if self.github.get(\"commit\"):\n self.name = f\"{self.reason}: {self.github['commit']}\"", "def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def convert_name(self, human_name):\n\n human_name = HumanName(human_name)\n if human_name.suffix:\n self.metadata[\"gutenberg_name_suffix\"] = human_name.suffix\n human_name.suffix = \"\"\n if human_name.nickname:\n # LOGGER.debug(\"%s nickname: %s\", str(human_name), human_name.nickname)\n no_nickname = copy.copy(human_name)\n no_nickname.nickname = \"\"\n first_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.first, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.first,\n # re.UNICODE\n # ),\n # human_name.nickname\n # )\n if first_name_match and len(first_name_match.group(0)) >= len(human_name.first):\n human_name.first = first_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.first):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"] = set([str(no_nickname)])\n middle_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.middle, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.middle, re.UNICODE\n # ),\n # human_name.nickname\n # )\n if middle_name_match and len(middle_name_match.group(0)) >= len(human_name.middle):\n human_name.middle = middle_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.middle):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"].add(str(no_nickname))\n return human_name", "def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n # Asserting that formatted_name equals 'Janis Joplin'\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name('janos', 'jk')\n\t\tself.assertEqual(formatted_name, 'Janos Jk')", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def series_statement_added_entry_personal_name(self, key, value):\n indicator_map1 = {\"0\": \"Forename\", \"1\": \"Surname\", \"3\": \"Family name\"}\n indicator_map2 = {\n \"0\": \"Main entry not represented by pronoun\",\n \"1\": \"Main entry represented by pronoun\"}\n field_map = {\n 'p': 'name_of_part_section_of_a_work',\n '6': 'linkage',\n 'u': 'affiliation',\n 'b': 'numeration',\n '4': 'relator_code',\n 'x': 'international_standard_serial_number',\n 'n': 'number_of_part_section_of_a_work',\n 'a': 'personal_name',\n '8': 'field_link_and_sequence_number',\n 'k': 'form_subheading',\n 't': 'title_of_a_work',\n 'e': 'relator_term',\n 'l': 'language_of_a_work',\n 'c': 'titles_and_other_words_associated_with_a_name',\n 'g': 'miscellaneous_information',\n 'f': 'date_of_a_work',\n 'd': 'dates_associated_with_a_name',\n 'v': 'volume_sequential_designation',\n }\n\n order = utils.map_order(field_map, value)\n\n if key[3] in indicator_map1:\n order.append('type_of_personal_name_entry_element')\n\n if key[4] in indicator_map2:\n order.append('pronoun_represents_main_entry')\n\n return {\n '__order__': tuple(order) if len(order) else None,\n 'name_of_part_section_of_a_work': utils.force_list(\n value.get('p')\n ),\n 'linkage': value.get('6'),\n 'affiliation': value.get('u'),\n 'numeration': value.get('b'),\n 'relator_code': utils.force_list(\n value.get('4')\n ),\n 'international_standard_serial_number': value.get('x'),\n 'number_of_part_section_of_a_work': utils.force_list(\n value.get('n')\n ),\n 'personal_name': value.get('a'),\n 'field_link_and_sequence_number': utils.force_list(\n value.get('8')\n ),\n 'form_subheading': utils.force_list(\n value.get('k')\n ),\n 'title_of_a_work': value.get('t'),\n 'relator_term': utils.force_list(\n value.get('e')\n ),\n 'language_of_a_work': value.get('l'),\n 'titles_and_other_words_associated_with_a_name': utils.force_list(\n value.get('c')\n ),\n 'miscellaneous_information': value.get('g'),\n 'date_of_a_work': value.get('f'),\n 'dates_associated_with_a_name': value.get('d'),\n 'volume_sequential_designation': value.get('v'),\n 'type_of_personal_name_entry_element': indicator_map1.get(key[3]),\n 'pronoun_represents_main_entry': indicator_map2.get(key[4]),\n }", "def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username", "def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')", "def test_last_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__last_name=unromanized,\n profile__romanized_last_name=romanized,\n )\n assert CDDWriter.last_name(profile) == expected", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def test_contributor_name_no_last_name(self):\n user = User.objects.create(username='admin', first_name='Jordan')\n \n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan')", "def test_contributor_name_no_last_name(self):\n user = User.objects.create(username='admin', first_name='Jordan')\n \n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('david', 'Malan')\n self.assertEqual(formatted_name, 'David Malan')", "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('john', 'smith', 'billy')\n self.assertEqual(formatted_name, 'John Billy Smith')", "def fullname(self, name):\n f, l = name.split(' ')\n self.first = f\n self.last = l", "def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')", "def test_reformatted_full_name():\n assert reformatted_full_name(\"\") == \"\"\n assert reformatted_full_name(\"George\") == \"george\"\n assert reformatted_full_name(\"X Y Z A B\") == \"x b\"", "def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)" ]
[ "0.62963563", "0.6226209", "0.6223605", "0.6148646", "0.6140524", "0.61402285", "0.6099476", "0.60691816", "0.6056856", "0.6056856", "0.6024149", "0.6016685", "0.5969354", "0.5957472", "0.5951923", "0.5932206", "0.5932206", "0.5906392", "0.59056324", "0.589487", "0.58702075", "0.5865046", "0.5865046", "0.5857882", "0.5834754", "0.5789346", "0.57806265", "0.5774827", "0.57658744", "0.575959" ]
0.8450045
0
Audit commit metadata. Invalid hostnames such as localhost or (none) will be caught by this auditor. This will ensure that invalid email addresses or users will not show up in commits.
def audit_emails_in_metadata(self): # Iterate over commits.... disallowed_domains = ["localhost", "localhost.localdomain", "(none)", "bombardier.com", "rail.bombardier.com"] for commit in self.repository.commits.values(): for email_address in [ commit.committer_email, commit.author_email ]: # Extract the email address, and reject them if extraction fails.... extraction = re.match("^(\S+)@(\S+)$", email_address) if not extraction: self.__log_failure(commit.sha1, "Seemingly invalid email address: " + email_address) continue # Don't allow domains which are disallowed... domain = extraction.group(2) if domain in disallowed_domains: self.__log_failure(commit.sha1, "Email address using a blocked domain: " + email_address) continue # Ensure they have a valid MX/A entry in DNS.... try: dns.resolver.query(domain, "MX") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel): try: dns.resolver.query(domain, "A") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def lint_commit_author(commit):\n success = True\n if commit.author.email.endswith('users.noreply.github.com'):\n error(\n 'Commit author has no valid email address set: %s. '\n 'Use \"git config user.email [email protected]\" to '\n 'set a valid email address, then update the commit '\n 'with \"git rebase -i\" and/or '\n '\"git commit --amend --reset-author\". '\n 'Also check your GitHub settings at '\n 'https://github.com/settings/emails: your email address '\n 'must be verified, and the option \"Keep my email address '\n 'private\" must be disabled.' % (commit.author.email, ), commit)\n success = False\n\n if ' ' not in commit.author.name:\n warning(\n 'The commit author name \"%s\" contains no space. '\n 'Use \"git config user.name \\'Johnny English\\'\" to '\n 'set your real name, and update the commit with \"git rebase -i \" '\n 'and/or \"git commit --amend --reset-author\".' %\n (commit.author.name, ), commit)\n # A warning doesn't fail lint.\n\n return success", "def prepare_commit(self, commit):\n header = yaml.dump(commit.meta, default_flow_style=False)\n header += \"---\\n\"\n if commit.value is None:\n return bytes(header)\n else:\n return bytes(header) + bytes(commit.value)", "def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''):\n if commit_url == '' and (commit_sha == '' and full_name == ''):\n raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set')\n return None\n url = commit_url\n if url == '':\n url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name)\n url = self.get_full_url(url)\n\n json_data = loads(self.get_from_net(url))\n stats = {'additions': 0, 'deletions': 0}\n if 'stats' in json_data:\n stats['additions'] = json_data['stats']['additions']\n stats['deletions'] = json_data['stats']['deletions']\n\n return stats", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def commit_names(self, commit):\n return []", "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n # Create and delete events will already be recorded in the\n # ExplorationModel.\n if commit_type not in ['create', 'delete']:\n exp_models.ExplorationCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n exploration_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=self.status,\n post_commit_community_owned=self.community_owned,\n post_commit_is_private=(\n self.status == constants.ACTIVITY_STATUS_PRIVATE)\n ).put()", "def test_commit_author(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n author = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", author=author)\n\n head = repository.head.commit\n assert author.name == head.author.name and author.email == head.author.email", "def format_commit_header(commit):\n\n result = {\n \"repository\": commit[0][0],\n \"published\": commit[0][1],\n \"author\": commit[0][2],\n \"description\": commit[0][5],\n \"commit\": commit[0][6],\n \"timestamp\": commit[0][7]\n }\n return result", "def make_log_entries(commits, git_repo):\n entries = []\n # Add header\n author = git_repo.get_author_info()\n entries.append(\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email, get_version(git_repo,\n commits[0])))\n for commit in commits:\n commit_info = git_repo.get_commit_info(commit)\n entries.append(\"- %s\" % commit_info[\"subject\"])\n return entries", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def FakeCommitAsDict(commit_self):\n git_hash = commit_self.git_hash\n n = git_hash[len('git_hash_'):]\n return {\n 'repository': 'chromium',\n 'git_hash': git_hash,\n 'url': 'https://example.com/repository/+/' + git_hash,\n 'author': 'author%[email protected]' % (n,),\n 'subject': 'Subject.',\n 'message': 'Subject.\\n\\nCommit message.',\n }", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n # Create and delete events will already be recorded in the\n # CollectionModel.\n if commit_type not in ['create', 'delete']:\n collection_models.CollectionCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n collection_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=self.status,\n post_commit_community_owned=self.community_owned,\n post_commit_is_private=(\n self.status == constants.ACTIVITY_STATUS_PRIVATE)\n ).put()", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def _helperAuditMetadata(syn,temp,metaDf,refCol,cols2Check,fileExts,\n entityMissMetadata,incorrectAnnotated,missingAnno):\n \n print \"Checking annotations against metadata...\"\n tempDict = temp.annotations\n tempId = temp.id\n exts = ')|('.join(fileExts)\n exts = r'(' + exts + ')'\n tempName = re.sub(exts,\"\",temp.name)\n \n if bool(tempDict):\n row = metaDf.loc[metaDf[refCol] == tempName]\n if row.empty:\n entityMissMetadata.append(tempId)\n print \"missing metadata\"\n else:\n for colName in cols2Check:\n print \">%s checking...\" % colName\n if colName in tempDict.keys():\n if map(str,row[colName])[0] != temp[colName][0]:\n if colName in incorrectAnnotated.keys():\n incorrectAnnotated[colName].append(tempId)\n else:\n incorrectAnnotated[colName] = [tempId]\n print \">>incorrect\"\n else:\n print \">>Passed!\"\n else:\n if colName in missingAnno.keys():\n missingAnno[colName].append(tempId)\n else:\n missingAnno[colName] = [tempId]\n print \">>missing\"\n print \"\"", "def _maybe_set_name(self) -> None:\n if not self.name:\n if isinstance(self.github, dict):\n if self.github.get(\"commit\"):\n self.name = f\"{self.reason}: {self.github['commit']}\"", "def test_commit_committer(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n committer = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", committer=committer)\n\n head = repository.head.commit\n assert (\n committer.name == head.committer.name\n and committer.email == head.committer.email\n )", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def svn_client_commit_info_t_author_set(svn_client_commit_info_t_self, char_author): # real signature unknown; restored from __doc__\n pass", "def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n topic_rights = MockTopicRightsModel.get_by_id(self.id)\n if topic_rights.topic_is_published:\n status = constants.ACTIVITY_STATUS_PUBLIC\n else:\n status = constants.ACTIVITY_STATUS_PRIVATE\n\n topic_models.TopicCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n topic_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=status,\n post_commit_community_owned=False,\n post_commit_is_private=not topic_rights.topic_is_published\n ).put()", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))" ]
[ "0.6614517", "0.5485047", "0.5467171", "0.54115015", "0.5338054", "0.5191764", "0.5172768", "0.5139828", "0.5138755", "0.5125162", "0.50731736", "0.5002091", "0.49640554", "0.49508968", "0.49492618", "0.49048898", "0.49002182", "0.4897812", "0.4882828", "0.4845588", "0.48252293", "0.48036435", "0.4801142", "0.47736818", "0.47500736", "0.4743845", "0.4734618", "0.4719037", "0.4700205", "0.46714467" ]
0.7368635
0
Helper function to construct an address header for emails as Python stuffs it up
def address_header(self, name, email): fixed_name = Header( name ).encode() return unicode("{0} <{1}>").format(fixed_name, email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_rfc2822_address_header(header_text):\n def encode_addr(addr):\n name, email = addr\n # If s is a <text string>, then charset is a hint specifying the\n # character set of the characters in the string. The Unicode string\n # will be encoded using the following charsets in order: us-ascii,\n # the charset hint, utf-8. The first character set to not provoke a\n # UnicodeError is used.\n # -> always pass a text string to Header\n\n # also Header.__str__ in Python 3 \"Returns an approximation of the\n # Header as a string, using an unlimited line length.\", the old one\n # was \"A synonym for Header.encode().\" so call encode() directly?\n name = Header(pycompat.to_text(name)).encode()\n # if the from does not follow the (name <addr>),* convention, we might\n # try to encode meaningless strings as address, as getaddresses is naive\n # note it would also fail on real addresses with non-ascii characters\n try:\n return formataddr((name, email))\n except UnicodeEncodeError:\n _logger.warning(_('Failed to encode the address %s\\n'\n 'from mail header:\\n%s') % (addr, header_text))\n return \"\"\n\n addresses = getaddresses([pycompat.to_text(ustr(header_text))])\n return COMMASPACE.join(a for a in (encode_addr(addr) for addr in addresses) if a)", "def add_header(self, header, value):\n if not (header and value):\n raise ValueError('Header not provided!')\n if header.lower() == 'date':\n return False\n recipients_headers = ['to', 'cc', 'bcc']\n if header.lower() in recipients_headers or header.lower() == 'from':\n if not isinstance(value, list):\n value = [value]\n header_value = []\n for addr in value:\n # For each address in the recipients headers\n # Do the Header Object\n # PY3 works fine with Header(values, charset='utf-8')\n # PY2:\n # - Does not escape correctly the unicode values\n # - Must encode the display name as a HEADER\n # so the item is encoded properly\n # - The encoded display name and the address are joined\n # into the Header of the email\n mail_addr = address.parse(addr)\n display_name = Header(\n mail_addr.display_name, charset='utf-8').encode()\n if display_name:\n # decode_header method in PY2 does not look for closed items\n # so a ' ' separator is required between items of a Header\n if PY2:\n base_addr = '{} <{}>'\n else:\n base_addr = '{}<{}>'\n header_value.append(\n base_addr.format(\n display_name,\n mail_addr.address\n ).strip()\n )\n else:\n header_value.append(mail_addr.address)\n header_value = ','.join(header_value)\n else:\n header_value = Header(value, charset='utf-8').encode()\n # Get correct header name or add the one provided if custom header key\n header = Email.fix_header_name(header) or header\n if header.lower() == 'bcc':\n result = []\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n self.bccs = header_value\n else:\n self.email[header] = header_value\n return header_value", "def header_email(strg):\n\taddr = email.utils.parseaddr(strg)\n\tif not addr[1]:\n\t\traise EmailMissed(strg)\n\treturn addr[1]", "def formataddr( pair, charset=None ):\n name, address = pair\n name = name and name.strip()\n address = address and address.strip()\n\n if not name:\n return address\n\n if _is8bitstring( name ):\n header = Header( '\"%s\"' % name, charset )\n header.append( ' <%s>' % address, '8bit' )\n return header\n\n quotes = ''\n if specialsre.search( name ):\n quotes = '\"'\n name = escapesre.sub( r'\\\\\\g<0>', name )\n\n return '%s%s%s <%s>' % ( quotes, name, quotes, address )", "def format_address(value):\n if type(value) in (tuple, list):\n return ', '.join([format_address(v) for v in value])\n name, addr = parseaddr(value)\n return formataddr((encode_header(name), addr.encode('ascii')))", "def build_address(record):\n pass", "def format_header(self, text: str, anchor: Optional[str] = None) -> str:", "def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()", "def format_address(**args):\n #Begin with the organisation and PO Box number, if applicable.\n address = ''.join([args[entry] + '\\n' \n for entry in ['organisation', 'PO box']\n if args.get(entry)])\n #Format building name/number components.\n address += format_building_components(*[args.get(x) for x in \n ['sub-building name', \n 'building name', \n 'building number',\n 'concatenation indicator']])\n #Add thoroughfare (if present), locality/town and postcode.\n address += ''.join([args[entry] + '\\n' \n for entry in ['dependent thoroughfare', \n 'thoroughfare',\n 'double dependent locality',\n 'dependent locality',\n 'town',\n 'postcode']\n if args.get(entry)])\n return address.strip()", "def header_format(header, value, form = DEFAULT_FORMAT):\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn header_email(value)\n\telif header == \"Date\":\n\t\tparsed = email.utils.parsedate(value)\n\t\tif parsed:\n\t\t\treturn time.strftime(form, parsed)\n\t\treturn \"\"\n\tif header == \"Message-ID\":\n\t\treturn email.utils.unquote(value)\n\treturn value[:DEFAULT_MAXLEN]", "def headers_add_host(headers, address):\n\n headers.setdefault('Host', address)\n\n return headers", "def get_email_details(header: str) -> dict:\n # this is one way to solve the exercise\n # result_keys = [\"from\", \"to\", \"subject\", \"date\"]\n # search_strings = [\n # r\"From\\:\\s(.*)\",\n # r\"To\\:\\s(.*)\",\n # r\"Subject\\:\\s(.*)\",\n # r\"Date\\:\\s(.*)\\s[+-]\",\n # ]\n # result_values = [re.search(s, EMAIL_HEADER).group(1) for s in search_strings]\n # print(dict(zip(result_keys, result_values)))\n\n # or we could use groupdict as suggested\n m = re.search(\n r\"From\\:\\s(?P<from>.*)\\n.*To\\:\\s(?P<to>.*)\\n.*Subject\\:\\s(?P<subject>.+?)\\n.*Date\\:\\s(?P<date>.*)\\s[+-]\",\n header,\n re.MULTILINE | re.DOTALL,\n )\n return m.groupdict() if m else None", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def other_mail_address(self):\n return (self.mail_address_2 + ' ' + \n self.mail_address_3 + ' ' +\n self.mail_address_4)", "def header_values(header, mail):\n\tif header not in mail.keys():\n\t\traise HeaderMissed(header)\n\tvalues = [header_decode(mail[header])]\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn [email.utils.formataddr(x) for x in email.utils.getaddresses(values)]\n\treturn values", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")", "def get_address_string(self):\n output = ''\n if self.address_line_1:\n output += '{}'.format(self.address_line_1)\n if self.address_line_2:\n output += ', {}'.format(self.address_line_2)\n if self.city:\n output += ', {}'.format(self.city)\n if self.state:\n output += ', {}'.format(self.state)\n if self.zipcode:\n output += ' {}'.format(self.zipcode)\n return output", "def rfc822_escape(header):\n lines = header.split('\\n')\n sep = '\\n' + 8 * ' '\n return sep.join(lines)", "def get_email_details(header: str) -> dict:\r\n try:\r\n m = re.match(\r\n r\"\"\"\r\n ([\\w\\W]* # remove lines \r\n (\r\n ^Date: \\s*(?P<date>[\\w\\W]{25}) # obtain date (\"date\")\r\n |^From: \\s*(?P<from>[\\w\\W]*?$) # obtain sender (\"from\")\r\n |^To: \\s*(?P<to>[\\w\\W]*?$) # obtain receiver (\"to\")\r\n |^Subject: \\s*(?P<subject>[\\w\\W]*?$) # obtain subject (\"subject\")\r\n )){4}\r\n \"\"\",\r\n header,\r\n re.VERBOSE | re.MULTILINE,\r\n )\r\n\r\n return m.groupdict()\r\n\r\n except:\r\n return None", "def email_f(x: Text) -> Tuple[Text, Text]:\n return \"uri\", \"email://{}\".format(x.lower())", "def header(self, header, default=None):\n result = []\n header_value = self.email.get(header, default)\n if header_value:\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n\n return header_value", "def BuildHeaderString (text):\r\n\r\n return t.BuildHeaderString (text)", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def address_line_1(self):\n return \"{} {} {}\".format(\n self.fake.randomize_nb_elements(1000),\n self.fake.last_name(),\n self.fake.random_element(elements=STREET_SUFFIX)\n )", "def __addheader(self, msg, headername, headervalue):\n if self.__contains_nonascii_characters(headervalue):\n h = Header(headervalue, 'utf-8')\n msg[headername] = h\n else:\n msg[headername] = headervalue\n return msg", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def generateSMSEmail(profile):\n if profile['carrier'] is None or not profile['phone_number']:\n return None\n\n return str(profile['phone_number']) + \"@\" + profile['carrier']", "def _get_source_address(course_id, course_title):\r\n course_title_no_quotes = re.sub(r'\"', '', course_title)\r\n\r\n # For the email address, get the course. Then make sure that it can be used\r\n # in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)\r\n # character appears.\r\n from_addr = u'\"{0}\" Course Staff <{1}-{2}>'.format(\r\n course_title_no_quotes,\r\n re.sub(r\"[^\\w.-]\", '_', course_id.course),\r\n settings.BULK_EMAIL_DEFAULT_FROM_EMAIL\r\n )\r\n return from_addr", "def decode_email_address(address, charset=\"utf8\"):\r\n name = decode_email_header(address[0])\r\n addr = address[1]\r\n addr = \"<\" + addr + \">\"\r\n if not name:\r\n return addr\r\n return name + \" \" + addr" ]
[ "0.69923276", "0.67900985", "0.67680126", "0.6404634", "0.6254018", "0.62328947", "0.615882", "0.61046", "0.6023781", "0.6022702", "0.5986854", "0.5960331", "0.59242094", "0.5905854", "0.5892792", "0.58512026", "0.58358437", "0.5810467", "0.57352096", "0.5722051", "0.57130855", "0.5709188", "0.56994635", "0.5692479", "0.56684", "0.5637921", "0.56051266", "0.56034654", "0.55964833", "0.55856055" ]
0.7850054
0