query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Convert a float to 32bit integer
|
def float_to_int_32(x):
return np.float32(x).view(np.int32)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_int32(f):\n from numpy import array, clip\n\n img = array(clip(f,-2147483647,2147483647)).astype('i')\n return img",
"def to_float32(n):\n return np.cast[\"float32\"](n)",
"def bin_to_float32(b):\n bf = int_to_bytes(int(b, 2), 4) # 4 bytes needed for IEEE 754 binary32.\n return struct.unpack(\">f\", bf)[0]",
"def float_to_bin32(value):\n [d] = struct.unpack(\">L\", struct.pack(\">f\", value))\n return \"{:032b}\".format(d)",
"def unpack(self,f32):\n a = struct.pack('>f',f32)\n b = binascii.hexlify(a)\n return int(b)",
"def read_float32(self):\n return self.read(BitTypes.FLOAT_LE_32.value)",
"def float_to_int_16(x):\n return np.float16(x).view(np.int16)",
"def ts_float32(val):\n return np.float64(val)",
"def float2int(img_float):\n img = img_float * (MAX_VALUE - 1)\n img = img.astype(int)\n return img",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def f2i(f):\n return struct.unpack('i', struct.pack('f', f))[0]",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def float32_to_float8e4m3( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = True,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n if not fn:\n raise NotImplementedError(\n \"float32_to_float8e4m3 not implemented with fn=False.\"\n )\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n if uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if np.isinf(x):\n if saturate:\n return ret | 127\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 116:\n pass\n elif e < 120:\n # denormalized number\n ex = e - 119\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 135:\n # normalized number\n ex = e - 119 # 127 - 8\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if m & 0x80000 and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n return 0x80\n elif saturate:\n ret |= 0x7F # 01111110\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n else:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return ret | 126\n return 0x7F | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 117:\n pass\n elif e < 121:\n # denormalized number\n ex = e - 120\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 136:\n # normalized number\n ex = e - 120\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if (ret & 0x7F) == 0x7F:\n ret &= 0xFE\n if (m & 0x80000) and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7E:\n # rounding\n ret += 1\n elif not saturate:\n ret |= 0x7F\n elif saturate:\n ret |= 126 # 01111110\n else:\n ret |= 0x7F\n return int(ret)",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")",
"def _real_to_int(d):\n\n if d < 0:\n sign = 0x8000000000000000\n else:\n sign = 0\n\n exponent = log(d, 16)\n if (exponent < 0):\n exponent = ceil(exponent)\n else: # exponent > 0\n exponent = floor(exponent) + 1\n d = d / (16 ** exponent)\n\n mantissa = getMantissa(d)\n\n return sign | (int(exponent) + 64) << 56 | mantissa #updated for Python2 compatibility\n #return sign | (exponent + 64) << 56 | mantissa",
"def to_float32(elem):\n return elem.astype(np.float32)",
"def int32_t(n):\n return int(n).to_bytes(4, byteorder='little', signed=True)",
"def int_to_float(num_bits: int, value: int) -> float:\n if num_bits == 32:\n unpack_fmt = '>f'\n elif num_bits == 64:\n unpack_fmt = '>d'\n else:\n raise Exception(f\"Unhandled bit size: {num_bits}\")\n\n return struct.unpack(unpack_fmt, value.to_bytes(num_bits // 8, 'big'))[0]",
"def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data",
"def float_to_int(x, prec=64):\n if prec == 16: return float_to_int_16(x)\n elif prec == 32: return float_to_int_32(x)\n elif prec == 64: return float_to_int_64(x)\n else: raise ValueError",
"def float_to_int_64(x):\n return np.float64(x).view(np.int64)",
"def test_convert_float16_to_float32(in_dtype):\n check_type_supported(in_dtype)\n\n f16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16).view(in_dtype)\n f32_output = convert_float_to_float32(f16_input)\n\n nan = f16_input.isnan()\n assert torch.all(f32_output[nan].isnan())\n inf = f16_input.isinf()\n assert torch.all(f32_output[inf].isinf())\n other = torch.logical_not(torch.logical_or(nan, inf))\n assert torch.all(f16_input[other] == f32_output[other])",
"def convert_to_fp32(tensor):\n\n def _convert_to_fp32(tensor):\n return tensor.float()\n\n def _is_fp16_bf16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and tensor.dtype in (torch.float16, torch.bfloat16)\n\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)",
"def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)",
"def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor",
"def _convert_samples_to_float32(samples):\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= 1.0 / 2 ** (bits - 1)\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples",
"def _convert_samples_to_float32(samples):\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= (1. / 2 ** (bits - 1))\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples",
"def _bytes_to_int32(b):\n\n\treturn b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)",
"def int_to_float(val, flt_type):\n if flt_type is not np.longdouble:\n return flt_type(val)\n # The following works around a nasty numpy 1.4.1 bug such that:\n # >>> int(np.uint32(2**32-1)\n # -1\n if not isinstance(val, Integral):\n val = int(str(val))\n faval = np.longdouble(0)\n while val != 0:\n f64 = np.float64(val)\n faval += f64\n val -= int(f64)\n return faval",
"def u32(value: bytes, endian: str = \"little\", sign: bool = False) -> int:\n return unpack(value, 32, endian, sign)"
] |
[
"0.7797767",
"0.7367003",
"0.73220885",
"0.72220695",
"0.6944232",
"0.6939441",
"0.6875976",
"0.6756007",
"0.6687747",
"0.6678737",
"0.66762525",
"0.66639346",
"0.666378",
"0.6621309",
"0.6610098",
"0.6544187",
"0.6530338",
"0.6523839",
"0.64904106",
"0.64755565",
"0.63935524",
"0.63831264",
"0.6337633",
"0.6313026",
"0.6280322",
"0.62734926",
"0.6265217",
"0.6224127",
"0.62107205",
"0.6207839"
] |
0.8072008
|
0
|
Convert a float to 64bit integer
|
def float_to_int_64(x):
return np.float64(x).view(np.int64)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _convert_int_to_i64(val):\n if val > 0x7FFFFFFFFFFFFFFF:\n val -= 0x10000000000000000\n return val",
"def bin_to_float64(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack(\">d\", bf)[0]",
"def float_to_bin64(value):\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return \"{:064b}\".format(d)",
"def int64_t(n):\n return int(n).to_bytes(8, byteorder='little', signed=True)",
"def _decimal_to_int64(decimal: Decimal) -> int:\n return int(f\"{decimal:0f}\".replace(\".\", \"\"))",
"def f2i(f):\n return struct.unpack('i', struct.pack('f', f))[0]",
"def _real_to_int(d):\n\n if d < 0:\n sign = 0x8000000000000000\n else:\n sign = 0\n\n exponent = log(d, 16)\n if (exponent < 0):\n exponent = ceil(exponent)\n else: # exponent > 0\n exponent = floor(exponent) + 1\n d = d / (16 ** exponent)\n\n mantissa = getMantissa(d)\n\n return sign | (int(exponent) + 64) << 56 | mantissa #updated for Python2 compatibility\n #return sign | (exponent + 64) << 56 | mantissa",
"def u64(value: bytes, endian: str = \"little\", sign: bool = False) -> int:\n return unpack(value, 64, endian, sign)",
"def to_int32(f):\n from numpy import array, clip\n\n img = array(clip(f,-2147483647,2147483647)).astype('i')\n return img",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")",
"def int_to_float(val, flt_type):\n if flt_type is not np.longdouble:\n return flt_type(val)\n # The following works around a nasty numpy 1.4.1 bug such that:\n # >>> int(np.uint32(2**32-1)\n # -1\n if not isinstance(val, Integral):\n val = int(str(val))\n faval = np.longdouble(0)\n while val != 0:\n f64 = np.float64(val)\n faval += f64\n val -= int(f64)\n return faval",
"def uint64_t(n):\n return int(n).to_bytes(8, byteorder='little', signed=False)",
"def float2int(img_float):\n img = img_float * (MAX_VALUE - 1)\n img = img.astype(int)\n return img",
"def int_r(f):\n return int(np.round(f))",
"def float_to_binary(x, n=64):\n return _fix_sign(int_to_binary(float_to_int(x, n), n))",
"def extract_64timestamp_fraction(bits: str) -> str:\n assert len(bits) == 64\n bits = bits[32:64]\n # __log.info(bits)\n ints = int(bits, 2)\n result = ints / _max_32bit\n result = int(result * 1000000000)\n result = str(result)\n while len(result) < 9:\n result = '0' + result\n return result",
"def base2int(self, float_number):\r\n return int(round(float_number * self.mult_base))",
"def ts_float32(val):\n return np.float64(val)",
"def int2float(img_int):\n img = img_int.astype(np.float64)\n img /= (MAX_VALUE - 1)\n return img",
"def bin_to_float(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack('>d', bf)[0]",
"def float_to_byte(val):\n return int(val*255)",
"def int_to_float(num_bits: int, value: int) -> float:\n if num_bits == 32:\n unpack_fmt = '>f'\n elif num_bits == 64:\n unpack_fmt = '>d'\n else:\n raise Exception(f\"Unhandled bit size: {num_bits}\")\n\n return struct.unpack(unpack_fmt, value.to_bytes(num_bits // 8, 'big'))[0]",
"def bit_to_long(bits: str) -> Decimal:\n ints = int(bits, 2)\n result = Decimal(ints) / Decimal(_max_32bit)\n return result",
"def _B64ToNum(b64):\r\n return number.bytes_to_long(base64.urlsafe_b64decode(b64))",
"def ReadInt64(self, endian=\"<\"):\n return self.unpack('%sq' % endian, 8)",
"def decode_i64(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, True)",
"def float_to_int_32(x):\n return np.float32(x).view(np.int32)",
"def float_to_bin(value): # For testing.\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return '{:064b}'.format(d)"
] |
[
"0.6942918",
"0.6889584",
"0.669873",
"0.6556856",
"0.6440175",
"0.63891",
"0.6360378",
"0.6333624",
"0.62157387",
"0.61884594",
"0.61860836",
"0.61694336",
"0.61260474",
"0.6114865",
"0.60392386",
"0.59791774",
"0.59192514",
"0.5894881",
"0.5893504",
"0.5890302",
"0.586931",
"0.5839216",
"0.58353966",
"0.5815416",
"0.58045185",
"0.5792012",
"0.5784083",
"0.576272",
"0.574897",
"0.57271224"
] |
0.72442496
|
0
|
Convert a float to integer with precision prec
|
def float_to_int(x, prec=64):
if prec == 16: return float_to_int_16(x)
elif prec == 32: return float_to_int_32(x)
elif prec == 64: return float_to_int_64(x)
else: raise ValueError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _float2int(x: float) -> int:\n return round(x * 100)",
"def set_num_precision(number, precision, mode='int'):\n fmt = '{:.%ie}' % (precision - 1)\n value = float(fmt.format(number))\n if mode == 'int':\n return int(value)\n else:\n return value",
"def int_r(f):\n return int(np.round(f))",
"def base2int(self, float_number):\r\n return int(round(float_number * self.mult_base))",
"def quantize_float(f, q):\n return int(round(f / q) * q)",
"def float_to_fp(x, precision='single'):\n\n # Zero\n if x == 0:\n return 0\n\n # Inf\n if math.isinf(x):\n s = '0' if x > 0 else '1'\n return int(s + '1' * _Exponent_bits[precision] + '0' * _Fraction_bits[precision], 2)\n\n # NaN\n if math.isnan(x):\n return int('0' + '1' * _Exponent_bits[precision] + '1' * _Fraction_bits[precision], 2)\n\n if not float_in_range(x, precision):\n raise ValueError(\"Value out of range for precision\")\n\n # Get exponent and upper fraction\n l = abs(int(x)) # TODO check abs()\n f_upper = bin(l)[3:] # remove 0b1 (includes leading 1 implied in fp)\n e = bin(len(f_upper) + _Bias[precision])[2:2 + _Exponent_bits[precision]]\n\n # Get lower fraction\n r = abs(x) - l # TODO check abs()\n fraction_bits = len(f_upper)\n f_lower = ''\n while r != 0.0 and fraction_bits <= _Fraction_bits[precision]:\n r *= 2\n fraction_bits += 1\n f_lower = f_lower + str(int(r))\n r -= int(r)\n\n # Get sign and join\n sign = '1' if x < 0 else '0'\n res = zfill_right(sign + e + f_upper + f_lower, _Bitwidth[precision])\n return int(res, 2)",
"def float2int(value_float, currency):\r\n if currency in \"BTC LTC NMC\":\r\n return int(round(value_float * 100000000))\r\n elif currency in \"JPY SEK\":\r\n return int(round(value_float * 1000))\r\n else:\r\n return int(round(value_float * 100000))",
"def INT(val):\n return math.floor(val)",
"def float2int(img_float):\n img = img_float * (MAX_VALUE - 1)\n img = img.astype(int)\n return img",
"def iceil(f: SupportsFloat) -> int:\n\t\t# noinspection PyTypeChecker\n\t\treturn int(np.ceil(f))",
"def float_round(x, prec=2, base=.05):\n return round(base * round(float(x) / base), prec)",
"def _int2float(x: int) -> float:\n return round(x / 100, 2)",
"def whole_number_to_int(value: AnyBasicType) -> AnyBasicType:\n if isinstance(value, float) and value.is_integer():\n return int(value)\n return value",
"def fp_to_float(fp, precision='single'):\n\n if precision not in ('half', 'single', 'double', 'quad'):\n raise ValueError(\"Precision must be one of 'half', 'single', 'double', or 'quad\")\n if not isinstance(fp, int):\n raise TypeError(\"fp must be an integer\")\n\n fp = bin(fp)[2:].zfill(_Bitwidth[precision])\n s = fp[0]\n e = fp[1:1 + _Exponent_bits[precision]]\n f = fp[1 + _Exponent_bits[precision]:]\n\n if e == '0' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return 0.0\n else:\n raise ValueError(\"Subnormal number not supported\")\n elif e == '1' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return math.inf if s == '0' else -math.inf\n else:\n # Or float('nan') (Using math.nan permits object comparision, i.e. x is math.nan)\n return math.nan\n\n ev = 2 ** (int(e, 2) - _Bias[precision])\n fv = 1 + (int(f, 2) / 2 ** _Fraction_bits[precision])\n v = ev * fv\n return v if s == '0' else -v",
"def __int__(self) -> int:\n return self._translate_in_type(int, self.float_num)",
"def convert_integer_price(decimal_price):\n return int(float(decimal_price) * 100)",
"def toint(number):\n if isinstance(number, float):\n if number > 1:\n number = round(number, 0)\n else:\n # The following solves when image has small dimensions (like 1x54)\n # then scale factor 1 * 0.296296 and `number` will store `0`\n # that will later raise ZeroDivisionError.\n number = round(math.ceil(number), 0)\n return int(number)",
"def get_precision(TP, FP):\n precision = TP / (TP + FP)\n return precision",
"def base2float(self, int_number):\r\n return float(int_number) / self.mult_base",
"def __float__(self) -> float:\n return self._translate_in_type(float, self.integer)",
"def _calculate_precision(interval_value: int) -> int:\n # log10(interval_value) + 1 is equivalent to len(str(interval_value)), but is significantly\n # faster and more memory-efficient\n if interval_value == 0:\n return 0\n if interval_value < 0:\n raise ValueError(\n f\"Expecting value to be a non-negative integer, got {interval_value}\"\n )\n return int(math.log10(interval_value)) + 1",
"def precision(self):\n return float(self.tp) / (self.tp + self.fp) if self.fp != 0 else 1",
"def scaled_float(int_val, scale):\n assert isinstance(int_val, int)\n unscaled = decimal.Decimal(int_val)\n scaled = unscaled.scaleb(-scale)\n float_val = float(scaled)\n return float_val",
"def _decimal_to_int64(decimal: Decimal) -> int:\n return int(f\"{decimal:0f}\".replace(\".\", \"\"))",
"def quote2int(self, float_number):\r\n return int(round(float_number * self.mult_quote))",
"def __round(num):\n return float(round(decimal.Decimal(num), DataGen.precision))",
"def rint(flt: float) -> int | float:\n return int(rounded) if (rounded := round(flt, 2)).is_integer() else rounded",
"def ir(some_value):\r\n return int(round(some_value))",
"def round(n, precision=DEFAULT_ERROR):\n n = parse_number(n)\n sign = -1 if n < 0 else 1\n\n base = int(n)\n if abs(n) + precision >= abs(base) + 1:\n return sign * (abs(base) + 1)\n elif abs(n) - precision <= abs(base):\n return base\n else:\n return n",
"def decimal_precision_from_scientific_notation(decimal_value: Decimal) -> int:\n return strict_integer_validator(\n None,\n -decimal_value.log10()\n )"
] |
[
"0.7167975",
"0.6879536",
"0.6711734",
"0.6599057",
"0.64479166",
"0.63786715",
"0.6356254",
"0.62676",
"0.6264848",
"0.6208819",
"0.6200068",
"0.6175348",
"0.61597306",
"0.6158532",
"0.6128278",
"0.61098605",
"0.60939324",
"0.6079625",
"0.60612583",
"0.6056072",
"0.60408086",
"0.60255015",
"0.6021184",
"0.59994566",
"0.59839153",
"0.59773654",
"0.5960996",
"0.5893567",
"0.5883149",
"0.588083"
] |
0.77927786
|
0
|
Convert an integer to an nbit binary number
|
def int_to_binary(x, n=64):
return format(x, 'b').zfill(n)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def intToBinary(x, N):\n return (\"{0:0\" + str(N) + \"b}\").format(x)",
"def int2bin(n: int) -> str:",
"def int2bin(n, bits=13):\n return \"\".join([str((n >> y) & 1) for y in range(bits - 1, -1, -1)])",
"def num_to_binary(n):\n if n == 0:\n return ''\n elif n % 2 == 1:\n return num_to_binary(n // 2) + '1'\n else:\n return num_to_binary(n // 2) + '0'",
"def convert_to_binary(num):\n return '{0:b}'.format(num)",
"def int2bin(i):\n if i == 0: return \"0\"\n s = ''\n while i:\n if i & 1 == 1:\n s = \"1\" + s\n else:\n s = \"0\" + s\n i /= 2\n return s",
"def int2bin(n, count=8):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])",
"def binbits(x, n):\n bits = bin(x).split('b')[1]\n if len(bits) < n:\n ans = '0' * (n - len(bits)) + bits\n else:\n ans = bits\n\n return ans",
"def int2bin(n, count=24):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])",
"def decimal2binary(self, n):\n\n octet = [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"]\n index = 0\n if n < 0 or n > 255:\n raise ValueError, \"Octet value must be between [0-255]\"\n if n == 0: \n return \"\".join(octet)\n while n > 0:\n octet[index] = str((n % 2))\n index += 1\n n = n >> 1\n octet.reverse()\n return \"\".join(octet)",
"def int_to_bin(n, width = 24):\r\n return ''.join([str((n >> y) & 1) for y in range(width-1, -1, -1)])",
"def binary(n):\n # handles negative numbers\n negative = False\n if n < 0:\n negative = True\n n = abs(n)\n\n # divide n by 2 while n != 0, append remainder of division to array\n number = []\n while n != 0:\n number.append(n % 2)\n n //= 2\n\n # return binary number as integer\n bin_number = 0\n mult = 10 ** (len(number) - 1)\n n_len = len(number)\n for i in range(n_len - 1, -1, -1):\n bin_number += (number[i] * mult)\n mult //= 10\n return bin_number if not negative else -bin_number",
"def _bits(num):\r\n return bin(int(num))[2:]",
"def DecimalToBinary(n):\n return bin(n)[2:]",
"def numToBin(string):\n return text_to_bits(str(int(bitShift(string)) % 1000))",
"def bits_to_bytes(n: int) -> int:\n return _round_bits(n, 8)",
"def make_bitstring(num):\n return bin(num)[2:]",
"def binary(n):\n a=str(n)\n bin=\"\"\n while n>=1:\n bin+=str(int(n%2))\n n=n//2\n bin=bin[len(bin)-1:-0:-1]+bin[0]\n for ele in bin:\n if ele!=0:\n index=bin.find(ele)\n break\n return bin",
"def to_bit(number):\n if number in range (256):\n binary = bin(number)[2::]\n return '0'*(8-len(binary)) + binary\n return '-1'",
"def octet(n):\n\t\n\treturn n & 0b11111111",
"def mk_bin(num):\n num = int(num) #convert to integer\n bnum = format(num, 'b').zfill(16) #put number in binary, pad with 0s\n return bnum",
"def get_bin(x, n=0):\n return format(x, 'b').zfill(n)",
"def integer_to_binary(x, n_bits=N_BITS):\n bit_list = [0] * n_bits\n for i in range(n_bits-1, -1, -1):\n div = x // (2**i)\n mod = x % (2**i)\n bit_list[i] = (div > 0) * 1\n x = mod\n return bit_list[::-1]",
"def binary(num):\n binary = \"\"\n \n while num > 0:\n bit = num%2\n binary = str(bit) + binary # on rajoute le bit au nombre en binaire mais à la fin parce que comme ça ça inverse l'ordre\n num = num//2\n\n return binary",
"def decint2binstr(n):\n if n < 0:\n return '-' + decint2binstr(-n)\n s = ''\n while n != 0:\n s = str(n % 2) + s\n n >>= 1\n return s or '0'",
"def dec_to_bin(n, digits):\n if(n<0) :\n sys.stderr.write( \"warning, negative n not expected\\n\")\n pass\n i = digits-1\n ans = \"\"\n while i >= 0 :\n b = (((1<<i)&n)>0) \n i -= 1\n ans = ans + str(int(b))\n return ans",
"def uint82bin(n, count=8):\n return \"\".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])",
"def dec2bin(n, ln=None):\n n = int(n)\n bStr = \"\"\n\n if n < 0:\n raise ValueError(\"must be a positive integer.\")\n # if n == 0: return '0'\n while n > 0:\n bStr = str(n % 2) + bStr\n n = n >> 1\n if not ln:\n l = len(bStr)\n else:\n l = ln\n b = \"0\" * (l - len(bStr)) + bStr\n return b",
"def bin_str(i):\n out = ''\n for j in range(N-1,-1,-1):\n if (i>>j) & 1 == 1:\n out += '1'\n else:\n out += '0'\n return out",
"def uint82bin(n, count=8):\n return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])"
] |
[
"0.8351496",
"0.8306802",
"0.7986127",
"0.79528177",
"0.78577054",
"0.7826324",
"0.78063375",
"0.7724613",
"0.7661041",
"0.7647572",
"0.7646287",
"0.7626045",
"0.7545623",
"0.74988794",
"0.74691707",
"0.74666137",
"0.7465818",
"0.7416674",
"0.7400173",
"0.7377615",
"0.7353318",
"0.73483825",
"0.7285091",
"0.7284718",
"0.7231474",
"0.72233534",
"0.7203214",
"0.719895",
"0.7195513",
"0.71861976"
] |
0.84325266
|
0
|
Convert a float to an nbit binary number
|
def float_to_binary(x, n=64):
return _fix_sign(int_to_binary(float_to_int(x, n), n))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def float_to_bin(value): # For testing.\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return '{:064b}'.format(d)",
"def float_to_bin (x):\n assert type (x) is float\n s_hex = float.hex (x)\n hex_parts = RE_FLOAT_HEX_PARTS.match (s_hex)\n assert hex_parts\n \n s = hex_parts.group ('sign')\n m = hex_parts.group ('mantissa')\n se = hex_parts.group ('signexp')\n e = hex_parts.group ('exp')\n \n # Mantissa, including sign bit\n # See also: http://stackoverflow.com/questions/1425493/convert-hex-to-binary\n s_bin = '['\n if s:\n s_bin += s\n if (int (m, 16) == 0) and (int (e) == 0):\n s_bin += \"0\"\n else:\n s_bin += \"1\"\n s_bin += \\\n \".\" \\\n + bin (int (m, 16))[2:].zfill (4 * len (m)) \\\n + \"]_{2}\"\n \n # Sign of exponent\n s_bin += \"e\" + se\n \n # Exponent\n s_bin += e\n\n return s_bin",
"def convert_to_binary(num):\n return '{0:b}'.format(num)",
"def int_to_binary(x, n=64):\n return format(x, 'b').zfill(n)",
"def DecimalToBinary(n):\n return bin(n)[2:]",
"def frac_bin(f, n=32):\n f -= math.floor(f) # get only the fractional part\n f *= 2**n # shift left\n f = int(f) # truncate the rest of the fractional content\n return f",
"def intToBinary(x, N):\n return (\"{0:0\" + str(N) + \"b}\").format(x)",
"def float_to_bin32(value):\n [d] = struct.unpack(\">L\", struct.pack(\">f\", value))\n return \"{:032b}\".format(d)",
"def bitfield(i,N): \n bits = bitstr(i,N)\n bits = [int(digit)*2-1 for digit in bits]\n return np.array(bits).astype(float)",
"def float_to_bin64(value):\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return \"{:064b}\".format(d)",
"def test_real_to_binary_single_precision(self):\n number = 3.14159265358979323846264338327950288419716939937510582097494\n result = utils.real_to_binary(number)\n expected_result = (\n '01000000010010010000111111011011'\n )\n self.assertEqual(result, expected_result)",
"def decimal2binary(self, n):\n\n octet = [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"]\n index = 0\n if n < 0 or n > 255:\n raise ValueError, \"Octet value must be between [0-255]\"\n if n == 0: \n return \"\".join(octet)\n while n > 0:\n octet[index] = str((n % 2))\n index += 1\n n = n >> 1\n octet.reverse()\n return \"\".join(octet)",
"def decimal_binary(num):\n\treturn \"{:08b}\".format(num)",
"def float_to_byte(val):\n return int(val*255)",
"def bits_to_bytes(n: int) -> int:\n return _round_bits(n, 8)",
"def num_to_binary(n):\n if n == 0:\n return ''\n elif n % 2 == 1:\n return num_to_binary(n // 2) + '1'\n else:\n return num_to_binary(n // 2) + '0'",
"def gray_to_binary(self, num):\n mask = num >> 1\n\n while (mask != 0):\n num = num ^ mask\n mask = mask >> 1\n\n return num",
"def _get_binary(value, bits):\n\n # http://www.daniweb.com/code/snippet216539.html\n return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])",
"def binary(n):\n # handles negative numbers\n negative = False\n if n < 0:\n negative = True\n n = abs(n)\n\n # divide n by 2 while n != 0, append remainder of division to array\n number = []\n while n != 0:\n number.append(n % 2)\n n //= 2\n\n # return binary number as integer\n bin_number = 0\n mult = 10 ** (len(number) - 1)\n n_len = len(number)\n for i in range(n_len - 1, -1, -1):\n bin_number += (number[i] * mult)\n mult //= 10\n return bin_number if not negative else -bin_number",
"def to_binary_string(x):\n return \"{0:b}\".format(x)",
"def get_bin(x, n=0):\n return format(x, 'b').zfill(n)",
"def _bin_backport(x):\n chars = []\n for n in range(7, -1, -1):\n y = x - 2**n\n if y >= 0:\n chars.append('1')\n x = y\n else:\n chars.append('0')\n return ''.join(chars)",
"def _bits(num):\r\n return bin(int(num))[2:]",
"def makeBinary(self):\r\n\t\tls = 5.12 #limite superior\r\n\t\tli = -5.12 #limite inferior\r\n\t\tt = 14 # total de binarios\r\n\t\t\r\n\t\tcadena_bits = \"\"\r\n\t\tfor i in self.values:\r\n\t\t\tentero = (int) ( ( ( i - li ) * ( 2 ** t ) ) / ( ls - li ) )\r\n\t\t\t#print entero\r\n\t\t\tcadena_bits += \"{0:b}\".format(entero).zfill(14)\r\n\t\t\t\r\n\t\tself.cadenaBits = cadena_bits\r\n\t\treturn cadena_bits",
"def decimals_to_binary(decimals, n_bits):\n decimals = np.array(decimals, int)\n if decimals.ndim != 1 or (decimals < 0).any():\n raise ValueError('decimals must be 1D with all nonnegative values')\n n_bits = np.array(n_bits, int)\n if decimals.shape != n_bits.shape:\n raise ValueError('n_bits must have same shape as decimals')\n if (n_bits <= 0).any():\n raise ValueError('all n_bits must be positive')\n binary = list()\n for d, b in zip(decimals, n_bits):\n if d > 2 ** b - 1:\n raise ValueError('cannot convert number {0} using {1} bits'\n ''.format(d, b))\n binary.extend([int(bb) for bb in np.binary_repr(d, b)])\n assert len(binary) == n_bits.sum() # make sure we didn't do something dumb\n return binary",
"def float_to_bytes(value):\n return struct.pack(\"f\", value)",
"def binbits(x, n):\n bits = bin(x).split('b')[1]\n if len(bits) < n:\n ans = '0' * (n - len(bits)) + bits\n else:\n ans = bits\n\n return ans",
"def freq2bin(f, srate, N):\n return (f / float(srate)) * N",
"def to_bit(number):\n if number in range (256):\n binary = bin(number)[2::]\n return '0'*(8-len(binary)) + binary\n return '-1'",
"def octet(n):\n\t\n\treturn n & 0b11111111"
] |
[
"0.7727614",
"0.7064918",
"0.6937852",
"0.6915586",
"0.68823963",
"0.68155485",
"0.6747921",
"0.6724134",
"0.67205435",
"0.66596603",
"0.66441965",
"0.66304857",
"0.6585514",
"0.65364355",
"0.6523004",
"0.6493496",
"0.6451158",
"0.63342595",
"0.63248837",
"0.63212866",
"0.6301936",
"0.62841403",
"0.62668645",
"0.6250267",
"0.623378",
"0.6229779",
"0.6229223",
"0.62199664",
"0.6212794",
"0.61981547"
] |
0.81579816
|
0
|
Convert each float in a pandas.dataframe column to binary representation.
|
def row_to_binary(row):
binaryrow = ''
for item, val in row.iteritems():
binaryrow += float_to_binary(val)
return binaryrow
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_binary(df, variable_names):\n recoded_df = df.copy()\n recoded_df[variable_names] = (\n recoded_df[variable_names]\n .astype(bool)\n .astype(\"int64\")\n )\n return recoded_df",
"def binary_encoding(df, bin_cols):\n for col in bin_cols:\n enc = BinaryEncoder(cols=col)\n bin_enc = enc.fit_transform(df[col])\n df = pd.concat([df, bin_enc], axis=1)\n df.drop(col, axis=1, inplace=True)\n return df",
"def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns",
"def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns",
"def float_to_binary(x, n=64):\n return _fix_sign(int_to_binary(float_to_int(x, n), n))",
"def convert_df_to_features(df, volume=False):\n if volume:\n return df.high.astype(float), df.low.astype(float), df.close.astype(float), df.volume.astype(float)\n else:\n return df.high.astype(float), df.low.astype(float), df.close.astype(float)",
"def to_numeric_and_downcast_data(df: pd.DataFrame):\n fcols = df.select_dtypes('float').columns\n \n icols = df.select_dtypes('integer').columns\n\n df[fcols] = df[fcols].apply(pd.to_numeric, downcast='float')\n \n df[icols] = df[icols].apply(pd.to_numeric, downcast='integer')\n\n return df",
"def get_float_data(dataframe):\n dataframe = dataframe[np.isfinite(dataframe.TIME_StartTime)]\n float_cols = [isfloatarray(col) for col in dataframe.values.T]\n return (dataframe.T[float_cols].T).astype(float)",
"def isbinary(f):\n return f.dtype == bool",
"def convert_int_to_str(df):",
"def float_to_bin(value): # For testing.\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return '{:064b}'.format(d)",
"def roundRelativeBinary(df, nBits):\n type=df.dtype\n # If dtype is not floating point number or int, skip this step\n if type.kind not in ['f', 'c', 'i', 'u']:\n return df\n shiftN = 2 ** nBits\n mantissa, exp2 = np.frexp(df)\n mantissa = np.rint(mantissa * shiftN)/shiftN\n result=(mantissa * 2 ** exp2.astype(float)).astype(type)\n return result",
"def FE_transform_numeric_columns(df, bin_dict, verbose=0):\r\n df = copy.deepcopy(df)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_'+binvalue\r\n if binvalue == 'log':\r\n print('Warning: Negative values in %s have been made positive before log transform!' %col)\r\n df.loc[df[col]==0,col] = 1e-15 ### make it a small number\r\n df[new_col] = np.abs(df[col].values)\r\n df[new_col] = np.log(df[new_col]).values\r\n elif binvalue == 'log10':\r\n print('Warning: Negative values in %s have been made positive before log10 transform!' %col)\r\n df.loc[df[col]==0,col] = 1e-15 ### make it a small number\r\n df[new_col] = np.abs(df[col].values)\r\n df[new_col] = np.log10(df[new_col]).values\r\n elif binvalue == 'sqrt':\r\n print('Warning: Negative values in %s have been made positive before sqrt transform!' %col)\r\n df[new_col] = np.abs(df[col].values) ### make it a small number\r\n df[new_col] = np.sqrt(df[new_col]).values\r\n elif binvalue == 'max-abs':\r\n print('Warning: Negative values in %s have been made positive before max-abs transform!' %col)\r\n col_max = max(np.abs(df[col].values))\r\n if col_max == 0:\r\n col_max = 1\r\n df[new_col] = np.abs(df[col].values)/col_max\r\n else:\r\n print('Warning: Negative values in %s have been made positive before log transform!' %col)\r\n df.loc[df[col]==0,col] = 1e-15 ### make it a small number\r\n df[new_col] = np.abs(df[col].values)\r\n df[new_col] = np.log(df[new_col]).values\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n df[col].plot.kde(ax=ax1, label=col,alpha=0.5,color='r')\r\n ax2 = ax1.twiny()\r\n df[new_col].plot.kde(ax=ax2,label=new_col,alpha=0.5,color='b')\r\n plt.legend();\r\n return df",
"def encoding_df(df, cols):\n import pandas as pd\n df = df[cols]\n obj_df = df.select_dtypes(include=['object']).copy()\n num_var = df.select_dtypes(include=['int','float']).copy()\n cat_var = pd.get_dummies(obj_df, columns = obj_df.columns)\n encoded_df = pd.concat([num_var, cat_var], axis=1, sort=False)\n return encoded_df",
"def _to_constant_df(self, num):\n if isinstance(num, pd.DataFrame):\n# pdb.set_trace()\n return num\n else:\n return self.data['ones'].copy() * num",
"def to_numerical(table, column_name):\n def replace(entry):\n return float(entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)",
"def convert_binary(pred):\n pred = pred.astype(np.float64)\n pred[pred <= 0.5] = 0.0\n pred[pred > 0.5] = 1.0\n return pred",
"def numerical_encoding(dataset,\n nominal_columns='auto',\n drop_single_label=False,\n drop_fact_dict=True,\n nan_strategy=_REPLACE,\n nan_replace_value=_DEFAULT_REPLACE_VALUE):\n dataset = convert(dataset, 'dataframe')\n if nan_strategy == _REPLACE:\n dataset.fillna(nan_replace_value, inplace=True)\n elif nan_strategy == _DROP_SAMPLES:\n dataset.dropna(axis=0, inplace=True)\n elif nan_strategy == _DROP_FEATURES:\n dataset.dropna(axis=1, inplace=True)\n if nominal_columns is None:\n return dataset\n elif nominal_columns == 'all':\n nominal_columns = dataset.columns\n elif nominal_columns == 'auto':\n nominal_columns = identify_nominal_columns(dataset)\n converted_dataset = pd.DataFrame()\n binary_columns_dict = dict()\n for col in dataset.columns:\n if col not in nominal_columns:\n # Not a nominal column -> Copy original\n converted_dataset.loc[:, col] = dataset[col]\n else:\n # A nominal column -> Convert\n unique_values = pd.unique(dataset[col])\n if len(unique_values) == 1 and not drop_single_label:\n # Only one value present -> Drop\n converted_dataset.loc[:, col] = 0\n else:\n converted_dataset.loc[:, col], binary_columns_dict[col] = pd.factorize(dataset[col])\n if drop_fact_dict:\n return converted_dataset\n else:\n return converted_dataset, binary_columns_dict",
"def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var",
"def _convert_bool(self) -> pd.Series:\n\n if self.requires_nan:\n dtype = \"float\"\n else:\n dtype = \"bool\"\n\n return self._convert(dtype=dtype)",
"def encode_data(data, columns=None, inplace=False):\n result = pd.get_dummies(data.xs, columns=columns, drop_first=True) # drop_first exist in pandas >= 0.18.1 only\n # to be done: convert C-1 all 0 data to all -1\n if inplace:\n data.xs = result\n return None\n else:\n return result",
"def itemsToFloat(self):\n returnvalue = Matrix()\n for row in self._value:\n newRow = list()\n for item in row:\n newRow.append(float(item))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def reduce_memory_footprint(df):\n for col in df.columns:\n if df[col].dtypes == 'float64':\n df[col] = df[col].astype('float32')\n elif df[col].dtypes == 'int64':\n df[col] = df[col].astype('int32')\n \n return df",
"def test_real_to_binary_single_precision(self):\n number = 3.14159265358979323846264338327950288419716939937510582097494\n result = utils.real_to_binary(number)\n expected_result = (\n '01000000010010010000111111011011'\n )\n self.assertEqual(result, expected_result)",
"def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64",
"def df_float2fillna(self, df):\n df = pd.to_numeric(df, errors=\"coerce\")\n df = df.fillna(999)\n # df = df.astype(np.int64)\n df = df.replace(999, \"\")\n df = df\n return df",
"def float_series() -> pd.Series:\n series = pd.Series([(n/1000) for n in range(1001)])\n return series",
"def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df",
"def byte_to_literal_strings(dataframe):\n # Select the str columns:\n str_df = dataframe.select_dtypes([np.object])\n\n if not str_df.empty:\n # Convert all of them into unicode strings\n str_df = str_df.stack().str.decode('utf-8').unstack()\n # Swap out converted cols with the original df cols\n for col in str_df:\n dataframe[col] = str_df[col]\n\n return dataframe",
"def float_to_bin (x):\n assert type (x) is float\n s_hex = float.hex (x)\n hex_parts = RE_FLOAT_HEX_PARTS.match (s_hex)\n assert hex_parts\n \n s = hex_parts.group ('sign')\n m = hex_parts.group ('mantissa')\n se = hex_parts.group ('signexp')\n e = hex_parts.group ('exp')\n \n # Mantissa, including sign bit\n # See also: http://stackoverflow.com/questions/1425493/convert-hex-to-binary\n s_bin = '['\n if s:\n s_bin += s\n if (int (m, 16) == 0) and (int (e) == 0):\n s_bin += \"0\"\n else:\n s_bin += \"1\"\n s_bin += \\\n \".\" \\\n + bin (int (m, 16))[2:].zfill (4 * len (m)) \\\n + \"]_{2}\"\n \n # Sign of exponent\n s_bin += \"e\" + se\n \n # Exponent\n s_bin += e\n\n return s_bin"
] |
[
"0.65553796",
"0.653674",
"0.643313",
"0.63819706",
"0.6041284",
"0.5914992",
"0.5717357",
"0.5704",
"0.56280065",
"0.56196666",
"0.559938",
"0.5566888",
"0.5526704",
"0.55197924",
"0.5503003",
"0.5501224",
"0.5498036",
"0.5497775",
"0.5450568",
"0.54475963",
"0.542765",
"0.54201597",
"0.5415367",
"0.5395697",
"0.5380637",
"0.5376671",
"0.5373933",
"0.53516984",
"0.5345352",
"0.5343103"
] |
0.70190734
|
0
|
Convert a binary string to elements of the field GF(2m_gf).
|
def binary_string_to_gf_elements(b, m_gf):
if m_gf == 1:
return np.fromiter(map(int, b), int)
else:
assert(len(b)%m_gf == 0)
res = np.zeros(len(b)//m_gf)
for i in range(len(b)//m_gf):
res[i] = int(b[i*m_gf:(i+1)*m_gf], 2)
return res.astype(int)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def array_to_gf_array(data, m_gf = 1, floatprec = 64):\n data_binary = []\n for val in data:\n data_binary.append(binary_string_to_gf_elements(float_to_binary(val, floatprec), m_gf))\n return np.array(data_binary,dtype=int)",
"def parse_binary_field(b):\n\n\n codec, length, params = struct.unpack(\">iii\", b[:12])\n len4 = lambda b: int(len(b[12:]) / 4)\n if codec == 1: return struct.unpack(\"f\" * length, b[12:])\n elif codec == 2: return struct.unpack(\"b\" * length, b[12:])\n elif codec == 3: return struct.unpack(\">\" + \"h\" * length, b[12:])\n elif codec == 4: return struct.unpack(\">\" + \"i\" * length, b[12:])\n elif codec == 5:\n chars = struct.unpack(\"c\" * (length * 4), b[12:])\n return [b\"\".join([\n c for c in chars[i * 4: (i + 1) * 4] if c != b\"\\x00\"\n ]).decode() for i in range(length)]\n elif codec == 6:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [chr(c) if c != 0 else \"\" for c in run_length_decode(integers)]\n elif codec == 7:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return run_length_decode(integers)\n elif codec == 8:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return delta_decode(run_length_decode(integers))\n elif codec == 9:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [n / params for n in run_length_decode(integers)]\n elif codec == 10:\n integers = struct.unpack(\">\" + (\"h\" * int(len(b[12:]) / 2)), b[12:])\n return [n / params for n in delta_decode(recursive_decode(integers))]\n else: raise ValueError(\".mmtf error: {} is invalid codec\".format(codec))",
"def bitfield(i,N): \n bits = bitstr(i,N)\n bits = [int(digit)*2-1 for digit in bits]\n return np.array(bits).astype(float)",
"def __convertToBinaryStr(self, hex_str): \n final_bi_str = ''\n for c in hex_str:\n bi_str = bin(int(c, 16))[2:]\n if len(bi_str) != 4:\n bi_str = (4 - len(bi_str)%4)*'0' + bi_str\n final_bi_str += bi_str\n \n # sanity check\n if not(len(final_bi_str)%4 == 0 and (len(hex_str)*4) == len(final_bi_str)):\n print('Problem in hex2bi conversion')\n \n return final_bi_str",
"def binary(message: str) -> bitarray:\n binary_message = bitarray()\n byte_message = bytes(message, encoding=\"ascii\")\n binary_message.frombytes(byte_message)\n return binary_message",
"def blob2image(s):\n d0 = ord(s[0])\n d1 = ord(s[1])\n assert len(s)==d0*d1+2,(len(s),d0,d1)\n return numpy.frombuffer(s[2:],dtype='B').reshape(d0,d1)",
"def from_graph6_bytes(string):\n def bits():\n \"\"\"Returns sequence of individual bits from 6-bit-per-value\n list of data values.\"\"\"\n for d in data:\n for i in [5, 4, 3, 2, 1, 0]:\n yield (d >> i) & 1\n\n if string.startswith(b'>>graph6<<'):\n string = string[10:]\n\n if sys.version_info < (3, ):\n data = [ord(c) - 63 for c in string]\n else:\n data = [c - 63 for c in string]\n if any(c > 63 for c in data):\n raise ValueError('each input character must be in range(63, 127)')\n\n n, data = data_to_n(data)\n nd = (n * (n - 1) // 2 + 5) // 6\n if len(data) != nd:\n raise NetworkXError(\n 'Expected %d bits but got %d in graph6' % (n * (n - 1) // 2, len(data) * 6))\n\n G = nx.Graph()\n G.add_nodes_from(range(n))\n for (i, j), b in zip([(i, j) for j in range(1, n) for i in range(j)], bits()):\n if b:\n G.add_edge(i, j)\n\n return G",
"def str2bf(string):\n\n result = \"\"\n for char in string:\n result += char2bf(char)\n\n return result",
"def string_to_bit_array(text_string: str) -> list:\n\n array = list()\n for char in text_string:\n # Get the char value on one byte\n bin_val = Des.bin_value(char, 8)\n # Add the bits to the final list\n array.extend([int(x) for x in list(bin_val)])\n return array",
"def string_to_bit_array(text):\n array = list()\n for char in text:\n bin_val = bin_value(char, 8) # Get value of char in one byte\n array.extend([int(x) for x in list(bin_val)]) # Add the bits to the list\n return array",
"def string_to_bigram (self, str):\n str = 'b' + str + 'e'\n\n bigrams = []\n for i in range(0, len(str)-1):\n bg = str[i: i+2]\n bigrams.append(bg)\n\n return bigrams",
"def string2bits(s=''):\n return [bin(ord(x))[2:].zfill(8) for x in s]",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def str_to_bin(string):\n ret = list(string)\n # convert to binary representation\n ret = ['{:07b}'.format(ord(x)) for x in ret]\n # split the binary into\n ret = [[bit for bit in x] for x in ret]\n # flatten it and convert to integers\n ret = [int(bit) for sublist in ret for bit in sublist]\n return ret",
"def _str_to_binary_string(string: str) -> str:\n binary_string = \"\"\n for char in string:\n ascii_code = ord(char)\n binary_string += format(ascii_code, \"08b\")\n\n if binary_string:\n return binary_string\n else:\n raise ValueError(\"Error converting message to binary\")",
"def _string_to_bitlist(self, data):\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n # bit-wise operation\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n return result",
"def string_bits(myStr):\n\n other = myStr[::2] \n \n return other",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 56\n (_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration,) = _struct_7q.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def _unpackb2(s, **options):\n if not isinstance(s, (str, bytearray)):\n raise TypeError(\"packed data must be type 'str' or 'bytearray'\")\n return _unpack(io.BytesIO(s), options)",
"def deser_string(f):\n nit = struct.unpack(\"<B\", f.read(1))[0]\n if nit == 253:\n nit = struct.unpack(\"<H\", f.read(2))[0]\n elif nit == 254:\n nit = struct.unpack(\"<I\", f.read(4))[0]\n elif nit == 255:\n nit = struct.unpack(\"<Q\", f.read(8))[0]\n return f.read(nit)",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 16\n (_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel,) = _struct_4i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def test_from_binary_bits_style(self):\n self.assertResult('[0001]', b4('[0001]'))",
"def s2b (s):\n return s.encode()",
"def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 16\n (_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel,) = _struct_4i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def tobits(s):\n # Get the length of the input string\n length = len(s)\n # Create an empty list\n outputbits = [0] * length\n # Run the for loop\n for i in range(0, length):\n # Create an empty string\n stchar = ''\n # Run the loop for each character\n for char in s[i]:\n # Convert each character to bit\n stchar = stchar + format(ord(char), 'b')\n # Get the output 8 bits\n outputbits[i] = stchar.zfill(8)\n # Join everything and make it a multiple of 8 bits\n outputbits = ''.join(outputbits)\n # Return the output bits\n return outputbits",
"def str2vec(_str):\n vec = np.zeros(4 * 43)\n for i, ch in enumerate(_str):\n offset = i*43 + (ord(ch)-ord('0'))\n vec[offset] = 1\n return vec",
"def unpack_bitstr(rev_cur_bit, bitstr):\r\n bstr_len = len(bitstr)\r\n return (\r\n ''.join([rev_cur_bit[bitstr[i:i + 2]] for i in range(0, bstr_len, 2)])\r\n )",
"def instance2fv(self, text):\n if isinstance(text, unicode):\n text = text.encode('utf8')\n\n arr = np.zeros((self.n_feats,), dtype='uint32')\n\n # Convert the text to a sequence of ascii values\n ords = map(ord, text)\n\n # Count the number of times we enter each state\n state = 0\n statecount = defaultdict(int)\n for letter in ords:\n state = self.tk_nextmove[(state << 8) + letter]\n statecount[state] += 1\n\n # Update all the productions corresponding to the state\n for state in statecount:\n for index in self.tk_output.get(state, []):\n arr[index] += statecount[state]\n\n # The returned vector is the TFxIDF vector. The IDF for the\n # linguini system is actually the inv-lang-freq, and this is\n # pre-computed from the training data. We also normalize to len 1\n # at this stage.\n retval = arr * self.ilf\n return retval",
"def bytes2matrix(text):\n return [list(text[i:i+4]) for i in range(0, len(text), 4)]",
"def single_string_to_actg(bin_str: str) -> str:\r\n y = \"\"\r\n i = 1\r\n while (1):\r\n if i >= len(bin_str):\r\n break\r\n if bin_str[i - 1] == '0' and bin_str[i] == '0':\r\n y += \"A\"\r\n if bin_str[i - 1] == '0' and bin_str[i] == '1':\r\n y += \"C\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '0':\r\n y += \"G\"\r\n if bin_str[i - 1] == '1' and bin_str[i] == '1':\r\n y += \"T\"\r\n i = i + 2\r\n return y"
] |
[
"0.61993897",
"0.5916638",
"0.55031425",
"0.54492235",
"0.5442366",
"0.5400809",
"0.539698",
"0.5385185",
"0.5367709",
"0.52807707",
"0.52589214",
"0.52526057",
"0.52484083",
"0.52085364",
"0.5184683",
"0.5174587",
"0.51419324",
"0.50609285",
"0.4980555",
"0.4975593",
"0.49551478",
"0.49413997",
"0.49343735",
"0.49197587",
"0.49146125",
"0.49064872",
"0.48913926",
"0.48802176",
"0.488017",
"0.48764226"
] |
0.7760681
|
0
|
Convert an array of floats to their representation in GF(2m_gf).
|
def array_to_gf_array(data, m_gf = 1, floatprec = 64):
data_binary = []
for val in data:
data_binary.append(binary_string_to_gf_elements(float_to_binary(val, floatprec), m_gf))
return np.array(data_binary,dtype=int)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_float(array):\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array",
"def convertToFloatArray(booleanArray: typing.List[bool]) -> typing.List[float]:\n ...",
"def floatArrayToPrt(float_array):\n\n util = OpenMaya.MScriptUtil() \n util.createFromList(float_array, len(float_array))\n\n return util.asFloatPtr()",
"def binary_string_to_gf_elements(b, m_gf):\n if m_gf == 1:\n return np.fromiter(map(int, b), int)\n else:\n assert(len(b)%m_gf == 0)\n res = np.zeros(len(b)//m_gf)\n for i in range(len(b)//m_gf):\n res[i] = int(b[i*m_gf:(i+1)*m_gf], 2)\n return res.astype(int)",
"def float_array_string(arr: Iterable[float]) -> str:\n return \"[\" + \", \".join([\"{:.4f}\".format(el) for el in arr]) + \"]\"",
"def solution(array):\n array1 = np.array(array)\n array2 = array1.astype(float)\n return array2",
"def floatArrayToString(fvalues, prec=3, delem=\",\"):\n\tsvalues = list(map(lambda v : formatFloat(prec, v), fvalues))\n\tdelem = \" \" if delem is None else delem\n\treturn delem.join(svalues)",
"def show_gf(self, x):\n g = np.zeros((len(x[0]), self._num_fu), dtype=np.float64)\n for j in range(self._num_fu):\n x1 = self._gf[j*5]\n x2 = self._gf[j*5+1]\n x3 = self._gf[j*5+2]\n w = self._gf[j*5+3]\n a = self._gf[j*5+4]\n r1 = pow((x[0]-x1), 2)+pow((x[1]-x2), 2)+pow((x[2]-x3), 2)\n g[:, j] = a*np.exp(-r1/abs(w))\n\n return g",
"def _fs (v):\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n return v",
"def changeArray(array):\r\n\r\n return [[float(array[j][i]) for j in range(len(array))] for i in range(len(array[0]))]",
"def new_float(*args, **kwargs):\n return array.array(FLOAT_TYPECODE, *args, **kwargs)",
"def toFloatList(values):\n\treturn list(map(lambda va: float(va), values))",
"def bitfield(i,N): \n bits = bitstr(i,N)\n bits = [int(digit)*2-1 for digit in bits]\n return np.array(bits).astype(float)",
"def _calculate_float(byte_array):\n\tif len(byte_array) != 4:\n\t\treturn None\n\n\t'''\n\tmsg_prefix = \"[_calculate_float] \"\n\tprint(f\"{msg_prefix}byte_array = {[hex(b) for b in byte_array]}\")\n\t\n\t# if OPC_BIT_ORDER == MB_BIT_ORDER:\n\tpack_fstr = '4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t# else:\n\t# \tif OPC_BIT_ORDER == LSBFIRST: ## Little endian\n\tpack_fstr = '<4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t\t# else: \t## Big endian\n\tpack_fstr = '>4B'\n\tprint(f\" --> Using '{pack_fstr}' as pack_str: f = {round(struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0], 5)}\")\n\t'''\n\n\tf = struct.unpack('f', struct.pack('4B', *byte_array))[0]\n\t# f = struct.unpack('f', struct.pack(pack_fstr, *byte_array))[0]\n\treturn round(f, 5)",
"def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...",
"def itemsToFloat(self):\n returnvalue = Matrix()\n for row in self._value:\n newRow = list()\n for item in row:\n newRow.append(float(item))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f",
"def c_to_f(temp):\n if type(temp) is list or type(temp) is tuple:\n return [c * 1.8 + 32 for c in temp]\n else:\n return temp * 1.8 + 32.0",
"def c_to_f(temp):\n if type(temp) is list or type(temp) is tuple:\n return [c * 1.8 + 32 for c in temp]\n else:\n return temp * 1.8 + 32.0",
"def vec_to_pmf(a):\n s = jnp.sum(a) * 1.\n return a / s",
"def write_float_array(f, path, values, dtype='f8'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()",
"def get_list_of_float2(self):\n pass",
"def numarray(a: list) -> list[float]:\n return [float(aa) for aa in a]",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")",
"def uifft2(inarray):\n return uifftn(inarray, 2)",
"def floatX(arr):\n return np.asarray(arr, dtype=theano.config.floatX)",
"def f2b(self, fres, f):\n return f / fres",
"def _preprocess_float(values: Sequence) -> Tuple[Union[float, NullValue]]:\n\n processed = [float(x)\n if isinstance(x, numbers.Number)\n else x\n for x in values]\n\n return tuple(processed)",
"def array_to_grader(array, epsilon=1e-4):\n res = []\n for element in array:\n if isinstance(element, int):\n res.append(\"[{0}, {0}]\".format(element))\n else:\n res.append(\"({0}, {1})\".format(element - epsilon, element + epsilon))\n return \" \".join(res)",
"def generate_array_floats(n: int = 1024, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.rand(n).astype(numpy.float64)"
] |
[
"0.6489259",
"0.6212854",
"0.59715474",
"0.57902944",
"0.57456523",
"0.56836855",
"0.5678308",
"0.5571374",
"0.5543376",
"0.55162495",
"0.5508774",
"0.54554605",
"0.54518443",
"0.5439809",
"0.5423405",
"0.5413057",
"0.5378946",
"0.5377641",
"0.5377641",
"0.53730404",
"0.53656036",
"0.5306592",
"0.526806",
"0.5263613",
"0.5262701",
"0.5246294",
"0.5214034",
"0.5170508",
"0.5162179",
"0.51366425"
] |
0.7589889
|
0
|
Evaluates the chwirut objective function at a given set of points in ``H["x"]``. If ``"obj_component"`` is a field in ``sim_specs["out"]``, only that component of the objective will be evaluated. Otherwise, all 214 components are evaluated and returned in the ``"fvec"`` field.
|
def chwirut_eval(H, _, sim_specs):
batch = len(H["x"])
O = np.zeros(batch, dtype=sim_specs["out"])
for i, x in enumerate(H["x"]):
if "obj_component" in H.dtype.names:
if (
"user" in sim_specs
and "component_nan_frequency" in sim_specs["user"]
and np.random.uniform(0, 1) < sim_specs["user"]["component_nan_frequency"]
):
O["f_i"][i] = np.nan
else:
O["f_i"][i] = EvaluateFunction(x, H["obj_component"][i])
else:
O["fvec"][i] = EvaluateFunction(x)
O["f"][i] = sim_specs["user"]["combine_component_func"](O["fvec"][i])
return O
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def evaluate_obj(self, hparams):\n\n return [self.id, hparams, self.objective(hparams, self.device)]",
"def clhess(obj, exe, arg, delta=DELTA):\n f, x = get_method_and_copy_of_attribute(obj, exe, arg)\n def hess_f(*args, **kwargs):\n hess_val = numpy.zeros(x.shape + x.shape)\n it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xi in it:\n i = it.multi_index\n jt = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xj in jt:\n j = jt.multi_index\n xi += delta/2\n xj += delta/2\n fpp = f(*args, **kwargs)\n xj -= delta\n fpm = f(*args, **kwargs)\n xi -= delta\n fmm = f(*args, **kwargs)\n xj += delta\n fmp = f(*args, **kwargs)\n xi += delta/2\n xj -= delta/2\n hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2\n return hess_val\n return hess_f",
"def optimize_force_field_parameters_Cv_FWHM(cgmodel, file_list, temperature_list, param_bounds_dict,\n frame_begin=0, frame_end=-1, sample_spacing=1, sparsify_stride=1, output_data='output.nc',\n verbose=False, n_cpu=12, min_eff_samples=50,\n n_trial_boot=200, num_intermediate_states=0, plotfile='optimize_FWHM_iterations.pdf',\n min_method='TNC'):\n\n # Parse the force field parameter change dict:\n x0 = []\n param_names = []\n bounds = []\n units = []\n \n for key,value in param_bounds_dict.items():\n # value should be [(bound_lo, bound_hi)]\n # key should be a valid force field parameter name\n param_names.append(key)\n # Every parameter except periodicity should have units\n # For now, changing periodicity is not supported.\n \n # TODO: add support for sums of periodic torsion terms\n units.append(value[0].unit)\n bounds.append((value[0].value_in_unit(units[-1]),value[1].value_in_unit(units[-1])))\n # Use mean value as starting guess:\n x0.append((value[1].value_in_unit(units[-1])+value[0].value_in_unit(units[-1]))/2)\n\n if verbose:\n print(f'param_names: {param_names}')\n print(f'unit: {units}')\n print(f'bounds: {bounds}')\n print(f'x0: {x0}')\n\n def get_reeval_FWHM(param_values, cgmodel, file_list, temperature_list, output_data,\n param_names, units, frame_begin, sample_spacing, sparsify_stride, frame_end,\n n_cpu, n_trial_boot, num_intermediate_states):\n \"\"\"\n Objective function to be minimized\n \"\"\"\n\n # Construct dictionary of parameter update instructions:\n param_dict = {}\n \n # if len(param_names) == 1:\n # # 1D optimization:\n # param_dict[param_names[0]] = param_values * units[0]\n\n for i in range(len(param_names)):\n param_dict[param_names[i]] = param_values[i] * units[i]\n \n if verbose:\n print(f'Current parameters: {param_dict}') \n \n # Re-evaluate energy with current force field parameters:\n # For bootstrapping, evaluate all frames between [frame_begin:sparsify_stride:frame_end], and\n # apply the sample_spacing only to the heat capacity part\n U_eval, simulation = eval_energy(\n cgmodel,\n file_list,\n temperature_list,\n param_dict,\n frame_begin=frame_begin,\n frame_stride=sparsify_stride,\n frame_end=frame_end,\n n_cpu=n_cpu,\n verbose=verbose,\n )\n\n # Evaluate heat capacity and full-width half-maximum from bootstrapping:\n (new_temperature_list, C_v_values, C_v_uncertainty,\n Tm_value, Tm_uncertainty,\n Cv_height_value, Cv_height_uncertainty,\n FWHM_value, FWHM_uncertainty,\n N_eff_values) = bootstrap_heat_capacity(\n U_kln=U_eval,\n output_data=output_data,\n frame_begin=frame_begin,\n frame_end=frame_end,\n sample_spacing=sample_spacing,\n sparsify_stride=sparsify_stride,\n num_intermediate_states=num_intermediate_states,\n n_trial_boot=n_trial_boot,\n plot_file=f'heat_capacity_boot_{param_names[0]}_{param_values}.pdf',\n )\n \n if verbose:\n print(f'Current FWHM: {FWHM_value} +/- {FWHM_uncertainty[0]}')\n print(f'Current minimum N_eff: {np.min(N_eff_values)}')\n \n # Check for minimum N_eff criteria.\n # If too small, the minimization should stop if we're using a gradient method.\n # If we're not using a gradient method, return a large value.\n \n if np.min(N_eff_values) < min_eff_samples:\n print(f'Insufficient number of effective samples ({np.min(N_eff_values)})')\n \n # print(f'Creating a cgmodel with current parameters...,end='')\n # Create the cgmodel\n # print('done')\n \n exit()\n \n return FWHM_value.value_in_unit(unit.kelvin)\n\n # Run optimization:\n\n # if len(param_names) == 1:\n # # Do scalar optimization:\n # opt_results = minimize_scalar(get_reeval_FWHM, x0,\n # args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n # frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n # method='bounded',\n # bounds=[bounds[0][0],bounds[0][1]],\n # options={'maxiter': 25},\n # )\n\n # else:\n # Do multivariate optimization:\n opt_results = minimize(get_reeval_FWHM, x0, jac='2-point',\n args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n method=min_method,\n bounds=bounds,\n options={'maxfun': 25, 'finite_diff_rel_step': 0.005, 'eta': 0.5}, # This should be user input\n ) \n \n # TODO: plot the heat capacity curves at each iteration, and make a plot of all FWHM_values \n\n # Construct dictionary of optimal parameters:\n opt_param_dict = {} \n \n k = 0\n for key,value in param_bounds_dict.items():\n opt_param_dict[key] = opt_results.x[k] * units[k]\n k += 1\n \n return opt_param_dict, opt_results",
"def EvaluateFunction(x, component=np.nan):\n if np.isnan(component):\n f = np.zeros(NOBSERVATIONS)\n for i in range(NOBSERVATIONS):\n f[i] = y[i] - np.exp(-x[0] * t[i]) / (x[1] + x[2] * t[i])\n else:\n i = component\n f = y[i] - np.exp(-x[0] * t[i]) / (x[1] + x[2] * t[i])\n\n return f",
"def core_func(phi_by_term, out_by_phase, solver, cast_mode='', **kwargs):\n\n kernels_vec = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n _out_by_phase = out_by_phase.copy()\n\n for n in range(N, 0, -1):\n current_phi = _phi_by_term[(n, 0)]\n current_phase_sig = _complex2real(_out_by_phase[n],\n cast_mode=cast_mode)\n\n if n == 2:\n current_phi = np.concatenate(\n (current_phi, 2 * np.real(_phi_by_term[(2, 1)])), axis=0)\n current_phase_sig = np.concatenate(\n (current_phase_sig, np.real(_out_by_phase[0])), axis=0)\n\n kernels_vec[n] = _solver(current_phi, current_phase_sig, solver)\n\n for k in range(1, 1+n//2):\n p = n - 2*k\n _out_by_phase[p] -= binomial(n, k) * \\\n np.dot(phi_by_term[(n, k)], kernels_vec[n])\n return kernels_vec",
"def _Fqt_comp(vh,q):\n r_scale = 6.45/60\n edges,count,x_lim = vh\n # make sure that vh is normalized\n count = count/np.sum(count)\n\n return np.sum(count * np.exp(1j*q*edges*r_scale))",
"def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n # Stage 1: Execute all refactoring operations in the sequence x\n for refactoring_operation in x.refactoring_operations:\n refactoring_operation.do_refactoring()\n\n # Stage 2: Computing quality attributes\n # Todo: Add testability and modularity objectives\n # Todo: Normalize objective values in a standard range\n o1 = Objectives.reusability\n o2 = Objectives.understandability\n o3 = Objectives.flexibility\n o4 = Objectives.functionality\n o5 = Objectives.effectiveness\n o6 = Objectives.extendability\n # o7 = testability ## Our new objective\n # o8 = modularity ## Our new objective\n\n # Stage 3: Marshal objectives into vector\n out[\"F\"] = np.array([-1 * o1, -1 * o2, -1 * o3, -1 * o4, -1 * o5, -1 * o6, ], dtype=float)",
"def refine_energy_surf(input_matrix, energies, phase_obj, comps, variables,\n energy_func, max_iterations=1):\n # If energies is None, calculate energies of input_matrix\n if energies is None:\n energies = energy_func(*input_matrix.T)\n # for debugging purposes; return input (do nothing)\n if max_iterations < 0:\n return input_matrix, energies\n # Normalize site ratios\n # Normalize by the sum of site ratios times a factor\n # related to the site fraction of vacancies\n site_ratio_normalization = np.zeros(len(input_matrix))\n for idx, sublattice in enumerate(phase_obj.constituents):\n vacancy_column = np.ones(len(input_matrix))\n if 'VA' in set(sublattice):\n var_idx = variables.index(v.SiteFraction(phase_obj.name, idx, 'VA'))\n vacancy_column -= input_matrix[:, var_idx]\n site_ratio_normalization += phase_obj.sublattices[idx] * vacancy_column\n\n comp_list = sorted(list(comps))\n try:\n comp_list.remove('VA')\n except ValueError:\n pass\n # Remove last component from the list, as it's dependent\n comp_list.pop()\n # Map input_matrix to global coordinates (mole fractions)\n global_matrix = np.zeros((len(input_matrix), len(comp_list)+1))\n for comp_idx, comp in enumerate(comp_list):\n avector = [float(cur_var.species == comp) * \\\n phase_obj.sublattices[cur_var.sublattice_index] \\\n for cur_var in variables]\n global_matrix[:, comp_idx] = np.divide(np.dot(\n input_matrix[:, :], avector), site_ratio_normalization)\n global_matrix[:, -1] = energies\n\n # If this is a stoichiometric phase, we can't calculate a hull\n # Just return all points and energies\n if len(global_matrix) < len(comp_list)+1:\n return input_matrix, energies\n # Calculate the convex hull of the energy surface in global coordinates\n hull = scipy.spatial.ConvexHull(global_matrix, qhull_options='QJ')\n # Filter for real simplices\n simplices = hull.simplices[hull.equations[:, -1] <= -1e-6]\n vertices = list(set(np.asarray(simplices).ravel()))\n del global_matrix\n # terminating condition\n if max_iterations == 0:\n return input_matrix[vertices, :], energies[vertices]\n # For the simplices on the hull, calculate the centroids in internal dof\n centroid_matrix = input_matrix[np.asarray(simplices).ravel()]\n centroid_matrix.shape = (len(simplices), len(simplices[0]),\n len(input_matrix[0]))\n centroid_matrix = np.mean(centroid_matrix, axis=1, dtype=np.float64)\n\n # Calculate energies of the centroid points\n centroid_energies = energy_func(*centroid_matrix.T)\n # Group together the old points and new points\n input_matrix = np.concatenate((input_matrix[vertices, :],\n centroid_matrix), axis=0)\n energies = np.concatenate((energies[vertices], centroid_energies),\n axis=0)\n # Save some memory since we already grouped these\n del centroid_matrix\n del centroid_energies\n\n # Call recursively for next iteration, decrementing max_iterations\n return refine_energy_surf(input_matrix, energies,\n phase_obj, comps, variables,\n energy_func, max_iterations=max_iterations-1)",
"def computerhs(self, ue1, inletf, t, a, alpha, msh, Minv, S):\n # compute all edge fluxes (loop over all edges)\n flux = self.computeF(ue1, inletf, t, a, alpha, msh)\n #print 'flux=',flux\n\n # update DG elements (could be done in parallel)\n #ne = ue1.shape[1]\n rhs = np.zeros(msh.nu)\n for el in msh.elms:\n rhs[el.idx:el.idx+el.nnod] = self.computerhs1e(ue1, el, flux, a, Minv, S)\n #print 'rhs=', rhs\n return rhs",
"def cost_function(V, W, H):\r\n cost = 0\r\n # return the coordinate matrix of some compelte data matrix\r\n modded = V.tocoo() \r\n # add each row * column set\r\n for row, col, v in zip(modded.row, modded.col, modded.data):\r\n cost += np.square(v - np.inner(W[row], H[:,col]))\r\n return cost",
"def core_func(phi_by_term, out_by_term, solver, cast_mode='', **kwargs):\n\n kernels_vec = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n _out_by_term = _cast_complex2real(out_by_term, cast_mode)\n\n for n in range(1, N+1):\n k_vec = list(range(1+n//2))\n phi_n = np.concatenate([_phi_by_term[(n, k)] for k in k_vec],\n axis=0)\n out_n = np.concatenate([_out_by_term[(n, k)] for k in k_vec],\n axis=0)\n kernels_vec[n] = _solver((2**n) * phi_n, out_n, solver)\n\n return kernels_vec",
"def hf(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.hf(x, X, *self.params)",
"def objective_function(params):\n\n\tenergy = 0 # Initialize the energy in 0\n\n\tqc = get_var_form(params) # Obtain a quantum circuit instance from the parameters\n\n\tfor key in pauli_weights.keys(): # Iterate over the pauli string in the Pauli weight\n\n\t\tmc, n_measures = measure_circuit_factory(key) # Obtain the measurement circuit from the Pauli string\n\t\tqc_final = qc.compose(mc) # Combine both circuits\n\n\t\t# Execute the quantum circuit to obtain the probability distribution associated with the current parameters\n\t\tt_qc = transpile(qc_final, backend)\n\t\tq_obj = assemble(t_qc, shots=NUM_SHOTS)\n\t\tcounts = backend.run(q_obj).result().get_counts(qc_final)\n\n\t\tdistribution = get_distribution(counts, n_measures) # Convert the measured counts into a probability vector\n\n\t\t# Weight each probability by the diagonal factor, them sum all of them, and later multiply by the Pauli Weight\n\t\tenergy += np.sum(distribution * generate_diagonal_factors(n_measures)) * pauli_weights[key]\n\n\tenergy_list.append(energy) # Append the new computed energy\n\n\t# Print the iteration of the VQE and the energy\n\tprint('Iteration {}, Energy: {:.4f}'.format(len(energy_list), energy))\n\n\treturn energy",
"def calculate_component(dataframe, vector_w, component_num):\n if dataframe.empty:\n raise TypeError('It is impossible to calculate eigen vector W '\n 'and component Y on the empty dataframe.')\n\n df_size = len(dataframe)\n # calculate start value y(1)\n y_val = calculate_y(dataframe.iloc[0], vector_w)\n\n # to reach the stable state of the component\n # it should be calculated 10^component_num times.\n for _ in range(10 ** component_num):\n y_vector = [y_val, ]\n for row in range(1, df_size):\n vector_w = calculate_w(dataframe.iloc[row], vector_w,\n y_vector[row - 1], df_size)\n y_val = calculate_y(dataframe.iloc[row], vector_w)\n y_vector.append(y_val)\n\n component = (y_vector, vector_w)\n return component",
"def iot_obj_func(mol_fracs, x_mix, x_pure):\n \n x_mix = np.array(x_mix)\n x_pure = np.array(x_pure)\n calc_x_mix = np.dot(mol_fracs.reshape([1, len(mol_fracs)]), x_pure)\n return ((x_mix - calc_x_mix) ** 2).sum()",
"def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n # Stage 1: Execute all refactoring operations in the sequence x\n for refactoring_operation in x.refactoring_operations:\n refactoring_operation.do_refactoring()\n\n # Stage 2: Computing quality attributes\n # Todo: Add testability and modularity objectives\n # Todo: Normalize objective values in a standard range\n # Todo: Reduce QMOOD metrics to one objective by averaging them\n o1 = Objectives.reusability\n o2 = Objectives.understandability\n # o1 = 1/6 * sum qmood metrics\n # o2 = testability ## Our new objective\n # o3 = modularity ## Our new objective\n\n # Stage 3: Marshal objectives into vector\n out[\"F\"] = np.array([-1 * o1, -1 * o2], dtype=float)",
"def get_Jomega(self, vNH):\n num_vecs = len(vNH)\n J = np.zeros( (num_vecs, self.num_omega) )\n for i in range(num_vecs):\n J[i] = function_to_be_written(vNH[i])\n return 'Not composed'",
"def calcHomogeneity(Tech_res, x_S, y_S, x_vert, y_vert, objective = '0dB'):\n p0 = 2*10**(-5) # atmospheric pressure\n # optional: Optimization on 3dB loss over distance doubling\n xy_dist = np.sqrt((x_vert-x_S)**2 + (y_vert-y_S)**2)\n if objective == '6dB':\n for n in range(np.shape(Tech_res.p_SPL)[0]):\n n_dist = np.sqrt((x_vert[n]-x_S)**2 + (y_vert[n]-y_S)**2)\n Tech_res.p_SPL[n,:] = 20*np.log10(p0*10**(Tech_res.p_SPL[n,:] / 20) \\\n * (n_dist) / np.amin(xy_dist))\n elif objective == '3dB':\n for n in range(np.shape(Tech_res.p_SPL)[0]):\n n_dist = np.sqrt((x_vert[n]-x_S)**2 + (y_vert[n]-y_S)**2)\n Tech_res.p_SPL[n,:] = 20*np.log10(p0*10**(Tech_res.p_SPL[n,:] / 20) \\\n * np.sqrt((n_dist) / np.amin(xy_dist)))\n \n # Calculate the quantiles\n H = np.zeros([2, np.shape(Tech_res.p_SPL)[1]])\n for n in range(np.shape(Tech_res.p_SPL)[1]):\n H[:,n] = mquantiles(Tech_res.p_SPL[:,n], [0.1, 0.9], alphap=0.5, betap=0.5)\n H = H[1,:] - H[0,:]\n H_dist_high = 20 * np.log10(np.abs(np.amax(xy_dist)/np.amin(xy_dist)))\n H_dist_high = np.linspace(H_dist_high, H_dist_high, np.shape(H)[0])\n H_dist = [H_dist_high / 2, H_dist_high]\n # Calculate values for bar chart\n A_mean = 20 * np.log10(np.sqrt(np.mean((p0 * 10**(Tech_res.p_SPL/20))**2,axis=1)) / p0)\n min_val = np.floor(np.amin(A_mean))\n max_val = np.ceil(np.amax(A_mean))\n if max_val <= min_val:\n max_val = min_val + 5\n bins_shape = int(max_val - min_val + 1)\n hist_bins = np.linspace(min_val, max_val, bins_shape)\n Tech_res.update_tech_meas(H=H, H_dist=H_dist, A_mean=A_mean, hist_bins=hist_bins)\n return",
"def eval(self, Vobj):\n if is_Vector(Vobj):\n return self.A() * Vobj + self.b()\n return Vobj.evaluated_on(self)",
"def _objfunc(self, dv_dict):\n\n fail = 0\n metadata = self.metadata\n system = self.root\n\n try:\n for name in self.indep_list:\n self.set_desvar(name, dv_dict[name])\n\n # Execute the model\n #print(\"Setting DV\")\n #print(dv_dict)\n\n self.iter_count += 1\n update_local_meta(metadata, (self.iter_count,))\n\n try:\n with self.root._dircontext:\n system.solve_nonlinear(metadata=metadata)\n\n # Let the optimizer try to handle the error\n except AnalysisError:\n fail = 1\n\n func_dict = self.get_objectives() # this returns a new OrderedDict\n func_dict.update(self.get_constraints())\n\n # Record after getting obj and constraint to assure they have\n # been gathered in MPI.\n self.recorders.record_iteration(system, metadata)\n\n # Get the double-sided constraint evaluations\n #for key, con in iteritems(self.get_2sided_constraints()):\n # func_dict[name] = np.array(con.evaluate(self.parent))\n\n except Exception as msg:\n tb = traceback.format_exc()\n\n # Exceptions seem to be swallowed by the C code, so this\n # should give the user more info than the dreaded \"segfault\"\n print(\"Exception: %s\" % str(msg))\n print(70*\"=\",tb,70*\"=\")\n fail = 1\n func_dict = {}\n\n #print(\"Functions calculated\")\n #print(func_dict)\n return func_dict, fail",
"def _F_qt(vh_comp,conn,q,wind=5):\n\n # get the van Hove data\n (vanHove,temp,dtime) = extract_vanHove_all(vh_comp,conn,wind)\n \n Fqt = [_Fqt_comp(vh,q) for vh in vanHove]\n\n return Fqt,temp,dtime",
"def Hf(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.hf(x, X, *self.params)",
"def clmixhess(obj, exe, arg1, arg2, delta=DELTA):\n f, x = get_method_and_copy_of_attribute(obj, exe, arg1)\n _, y = get_method_and_copy_of_attribute(obj, exe, arg2)\n def hess_f(*args, **kwargs):\n hess_val = numpy.zeros(x.shape + y.shape)\n it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xi in it:\n i = it.multi_index\n jt = numpy.nditer(y, op_flags=['readwrite'], flags=['multi_index'])\n for yj in jt:\n j = jt.multi_index\n xi += delta/2\n yj += delta/2\n fpp = f(*args, **kwargs)\n yj -= delta\n fpm = f(*args, **kwargs)\n xi -= delta\n fmm = f(*args, **kwargs)\n yj += delta\n fmp = f(*args, **kwargs)\n xi += delta/2\n yj -= delta/2\n hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2\n return hess_val\n return hess_f",
"def evaluate_objective(x):\n\n x_points_cartesian = x_to_cartesian(x)\n hull = ConvexHull(x_points_cartesian)\n\n # Return the negative value because the optimization is a minimization\n return -hull.volume",
"def obj_func(no_cultures, times, c_meas, neighbourhood, params):\n # Could do tiling later in solve_model if faster.\n init_amounts = np.tile(params[: 3], no_cultures)\n params = params[3:]\n # Now find the amounts from simulations using the parameters.\n amounts_est = solve_model(init_amounts, times, neighbourhood, params)\n c_est = np.array([amounts_est[:, i*3] for i in range(no_cultures)]).flatten()\n err = np.sqrt(sum((c_meas - c_est)**2))\n return err",
"def objective(self, x):\n rvs = frozenset(map(frozenset, self._rvs))\n joint = self.construct_joint(x)\n joint = joint.sum(axis=self._others, keepdims=True)\n crv = joint.sum(axis=tuple(flatten(rvs)))\n\n H_crv = h(crv.ravel())\n H = h(joint.ravel()) - H_crv\n\n def I_P(part):\n margs = [ joint.sum(axis=tuple(flatten(rvs - p))) for p in part ]\n a = sum(h(marg.ravel()) - H_crv for marg in margs)\n return (a - H)/(len(part) - 1)\n\n parts = [p for p in partitions(map(frozenset, rvs)) if len(p) > 1]\n\n caekl = min(I_P(p) for p in parts)\n\n return caekl",
"def objective(data: VLEPoints, params: typing.List[float]) -> float:\n error = 0\n mixture = Mixture(\n name=\"\",\n first_component=data.components[0],\n second_component=data.components[1],\n uniquac_params=UNIQUACParameters.from_array(params),\n )\n for point in data:\n error += (get_partial_pressures(temperature=point.temperature,\n composition=point.composition,\n mixture=mixture,\n calculation_type=\"UNIQUAC\",\n )[0] - point.pressures[0]) ** 2 \\\n + (get_partial_pressures(temperature=point.temperature,\n composition=point.composition,\n mixture=mixture,\n calculation_type=\"UNIQUAC\",\n )[1] - point.pressures[1]) ** 2\n return numpy.sqrt(error / len(data))",
"def calc_C(h, x0, nu_C, W):\n M = len(nu_C)\n C = np.zeros((W, M), dtype=float)\n N = len(h)\n B = np.zeros((2 * N + 2, W))\n x = x0 * np.arange(0, N + 1, dtype=float) / N\n h_ext = np.concatenate(([1.0], h))\n rhs = np.r_[np.ones(N + 1, dtype=float), np.zeros(N + 1, dtype=float)]\n rhs[0] = rhs[0] / np.sqrt(2.0)\n rhs[N] = rhs[N] / np.sqrt(2.0)\n for m, nu_val in enumerate(nu_C):\n for r in range(W):\n k = r - (W / 2) + 1\n B[:N + 1, r] = h_ext * np.cos(2 * np.pi * (k - nu_val) * x)\n B[N + 1:, r] = h_ext * np.sin(2 * np.pi * (k - nu_val) * x)\n B[0, :] = B[0, :] / np.sqrt(2.0)\n B[N, :] = B[N, :] / np.sqrt(2.0)\n B[N + 1, :] = B[N + 1, :] / np.sqrt(2.0)\n B[2 * N + 1, :] = B[2 * N + 1, :] / np.sqrt(2.0)\n q, r = np.linalg.qr(B)\n C[:, m] = solve_triangular(r, np.dot(q.transpose(), rhs))\n # C[:,m] = np.linalg.lstsq(B, rhs)[0]\n return C",
"def objective_function(X, components, alpha=0.):\n\n lr = Ridge(fit_intercept=False, alpha=alpha)\n lr.fit(components.T, X.T)\n residuals = X - lr.coef_.dot(components)\n return np.sum(residuals ** 2) + alpha * np.sum(lr.coef_ ** 2)",
"def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension"
] |
[
"0.55313957",
"0.53080523",
"0.5189676",
"0.51875687",
"0.511367",
"0.51000154",
"0.50619596",
"0.50266045",
"0.5026222",
"0.50165236",
"0.5009043",
"0.5007817",
"0.50025773",
"0.49932408",
"0.49886763",
"0.49766004",
"0.49728853",
"0.49723494",
"0.49722487",
"0.49674708",
"0.4944687",
"0.49371725",
"0.4885442",
"0.48785514",
"0.4875507",
"0.48529378",
"0.48466647",
"0.48387456",
"0.48222092",
"0.48207843"
] |
0.70898503
|
0
|
Generates CSV given a list of summaries
|
def generate_csv(summaries, filename):
with open(filename, 'wb') as f:
header = ','.join(['ACTIVATION', 'HIDDEN SIZE', 'TRAIN LOSS', 'VAL LOSS', 'TRAIN PPX', 'VAL PPX']) + '\n'
f.write(header)
def extract_best(summary, metric):
return min([h.metrics[metric] for h in summary['history']])
for summary in summaries:
activation = summary['meta']['ACTIVATION']
h_size = summary['meta']['NUM_HIDDEN']
train_loss, val_loss, train_ppx, val_ppx = extract_best(summary, 'train_loss'), extract_best(summary, 'val_loss'), extract_best(summary, 'train_ppx'), extract_best(summary, 'val_ppx')
line = ",".join([activation] + map(lambda x: "%.2f" % (x), [h_size, train_loss, val_loss, train_ppx, val_ppx])) + '\n'
f.write(line)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_csv(filename, summaries, float_format='%.02f'):\n data = [['solution', 'total time', 'ok', 'errors']]\n\n for var, s in summaries[0].stats.iteritems():\n for stat in s:\n data[0].append('%s %s' % (var, stat))\n\n for summary in summaries:\n row = [summary.solution, float_format % summary.total_time, summary.ok,\n summary.errors]\n for s in summary.stats.itervalues():\n for stat in s.itervalues():\n row.append(float_format % stat)\n data.append(row)\n\n with open(filename, 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n writer.writerow(row)",
"def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)",
"def generate_csv(allstats, dirname):\n for type in CATEGORIES.keys():\n filename = os.path.join(dirname, f\"stats_{type}.csv.gz\")\n stats = allstats[type]\n with gzip.open(filename, 'wt') as handle:\n writer = csv.writer(handle)\n writer.writerow([\"year\", CATEGORIES[type], \"all\", \"ano\", \"ident\", \"inclass\", \"teacher\"])\n for year in sorted(stats.keys()):\n ystats = stats[year]\n for val in sorted(ystats.keys()):\n row = [year, val] + ystats[val]\n writer.writerow(row)",
"def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")",
"def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()",
"def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)",
"def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)",
"def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')",
"def write_csv(estimates: ListOfDicts, output_csv: str) -> None:\n with open(output_csv, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=estimates[0].keys())\n writer.writeheader()\n for row in estimates:\n writer.writerow(row)\n logging.info(f\"Wrote estimates as {output_csv}\")",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def exportFoldFile(vectors, authors, fileName):\n with open(fileName, \"w\") as fFile:\n for idv, vec in enumerate(vectors):\n [fFile.write(str(val)+',') for val in vec]\n fFile.write(authors[idv] + '\\n')",
"def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df",
"def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])",
"def _getCSVForOverall(self, statistic):\n\n rows = []\n\n final_stat = simplejson.loads(statistic.final_json)\n for name, result in final_stat.iteritems():\n rows.append([name, result])\n\n return rows",
"def generate_csv(results, keys, options):\n if results and keys:\n with open(options.output_file, mode=fd_write_options) as fd_output:\n spamwriter = csv.writer(fd_output, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\\n')\n \n if not(options.skip_header):\n spamwriter.writerow(keys)\n \n for group in results:\n output_line = []\n \n for key in keys:\n if key in group.keys():\n if \"member\" == key:\n output_line.append(\"\\n\".join(group[key].split(\" \")))\n else:\n output_line.append(group[key])\n else:\n output_line.append('')\n \n spamwriter.writerow(output_line)\n if options.newline:\n spamwriter.writerow('')\n \n fd_output.close()\n \n return None",
"def generate_summary(final_dictionary):\n otpt = open('multifind_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0] + ': ' + str(len(cat[1])) + '\\n'\n otpt.write(category_name)\n for entry in cat[1]:\n otpt.write('\\t' + str(entry[0]) + '\\n')\n otpt.write('\\t\\tTotal Entries: %s\\n' % str(entry[1]))\n otpt.write('\\t\\tUnique Species: %s\\n' % str(entry[2]))\n count = 0\n for sp in entry[3]:\n if count < entry[2]-1:\n if count == 0:\n otpt.write('\\t\\tSpecies: ' + sp + ', ')\n else:\n otpt.write(sp + ', ')\n else:\n otpt.write(sp + '\\n')\n count += 1\n otpt.close()",
"def generate_average_csv(fname, fields, trait_list):\n csv = open(fname, 'w')\n csv.write(','.join(map(str, fields)) + '\\n')\n csv.write(','.join(map(str, trait_list)) + '\\n')\n csv.close()\n\n return fname",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def print_csv_format(results):\n assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed\n for task, res in results.items():\n # Don't print \"AP-category\" metrics since they are usually not tracked.\n important_res = [(k, v) for k, v in res.items() if \"-\" not in k]\n print(\"copypaste: Task: {}\".format(task))\n print(\"copypaste: \" + \",\".join([k[0] for k in important_res]))\n print(\"copypaste: \" + \",\".join([\"{0:.4f}\".format(k[1]) for k in important_res]))",
"def output(owners, filename):\n\n out = open(filename, 'wb')\n writer = csv.writer(out)\n writer.writerow([\n 'Property Address',\n 'License Type',\n 'House',\n 'Street',\n 'License / Folio number',\n 'Civic address',\n 'Business name 1',\n 'Business name 2',\n 'Mail address 1',\n 'Mail address 2',\n 'Total Assess',\n 'Included Assess',\n 'Ann Chg',\n 'Unit'\n ])\n\n for owner in owners:\n owner.output_to(writer)",
"def exportcsvsumdata(self, log):\r\n csvdata= None\r\n\r\n if (log):\r\n csvdata = ('%s\\t'%(log['CALLSIGN']))\r\n csvdata += ('%s\\t'%(log['OPERATORS']))\r\n csvdata += ('%s\\t'%(log['LOCATION']))\r\n csvdata += ('%d\\t'%(log['COUNT']))\r\n csvdata += ('%s\\t'%(log['NAMES']))\r\n if(log['LASTWORKED']): \r\n csvdata += ('%s/%s UTC'%(log['LASTWORKED'],\r\n log['LWTIME'])) \r\n\r\n return csvdata",
"def makeCSV(self,file_name, data, topList):\n file_name = file_name+\".csv\"\n w = csv.writer(open(file_name, \"w\"))\n w.writerow(topList)\n for key, val in data.items():\n row = list(val)\n row.insert(0,key)\n w.writerow(row)",
"def write_csv(reviewer_data, file_obj):\n writer = csv.writer(file_obj)\n writer.writerow(\n ('Reviewer', 'Reviews', '-2', '-1', '+1', '+2', '+A', '+/- %',\n 'Disagreements', 'Disagreement%'))\n for (name, r_data, d_data) in reviewer_data:\n row = (name,) + r_data + d_data\n writer.writerow(row)",
"def make_echo_csv (list_of_region_tuples):\n\n #initialize the Echo formatted output dataframe\n out = pd.DataFrame(columns= ['Source Plate Name', 'Source Plate Type', 'Source Well', 'Sample ID', 'Sample Name', \\\n 'Sample Group', 'Sample Comment', 'Destination Plate Name', 'Destination Well', 'Transfer Volume'])\n\n idx = 0 #have to use a counter because we go through multiple lists and can't return to idx=0 each time\n #there may be a list of region tuples with source wells, volumes, dest wells\n for region in list_of_region_tuples:\n #for each well location to be shot from the current region\n for well in region[2]:\n #add the dest well\n out.loc[idx, 'Destination Well'] = well\n #Add the source well and transfer volume for that region\n out.loc[idx, ['Source Well', 'Transfer Volume']] = [region[0], region[1]]\n idx += 1\n\n #Set the unchanging names for the dataframe\n out[['Source Plate Name', 'Source Plate Type', 'Destination Plate Name']] = ['Source[1]', '384PP_AQ_BP', 'Destination[1]']\n\n return out",
"def write_csv(self, key_list, word_list):\n # Write out data\n out_data = []\n # Match filtered indexes to words\n for i in key_list.index:\n subset = word_list[word_list['key'] == i]\n # Add to aggregate list\n out_data.append(subset['word'].tolist())\n # Dump list to headerless CSV\n with open(self.output, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(out_data)\n return len(out_data)",
"def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])",
"def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])",
"def create_main_csv(data_list):\n path = \"camelot/clean/CWC_National-Register-of-Large-Dams_2019.csv\"\n if not os.path.exists(\"camelot/clean\"):\n os.makedirs(\"camelot/clean\")\n data_concat = pd.concat(data_list)\n data_concat.reset_index(drop=True).to_csv(path, index=True)",
"def _getCSVForPerField(self, statistic):\n\n rows = []\n\n chart_json = simplejson.loads(statistic.chart_json)\n description = chart_json['description'] \n header = []\n for item in description:\n header.append(item[-1].encode('utf-8'))\n rows.append(header)\n\n final_stat = simplejson.loads(statistic.final_json)\n for choice, result in final_stat.iteritems():\n row = []\n row.append(unicode(choice).encode('utf-8'))\n for item in result:\n row.append(unicode(item).encode('utf-8'))\n rows.append(row)\n\n return rows",
"def generate_report(self, output_path):\n with open(output_path, 'w', newline='', encoding=\"utf-8\") as csv_fd:\n writer = csv.writer(csv_fd, quoting=csv.QUOTE_NONNUMERIC, doublequote=False, escapechar=\"\\\\\")\n writer.writerow([\"category\", \"level\", \"description\", \"method\", \"parameter\", \"url\", \"body\"])\n writer.writerows(self._vulns)\n writer.writerows(self._anomalies)\n writer.writerows(self._additionals)"
] |
[
"0.698654",
"0.6883718",
"0.6254284",
"0.62226987",
"0.6196189",
"0.6109199",
"0.609774",
"0.6094412",
"0.607479",
"0.60741484",
"0.6022636",
"0.60143197",
"0.5970486",
"0.5954475",
"0.5951993",
"0.592181",
"0.59141076",
"0.5913574",
"0.5880284",
"0.5856723",
"0.5838795",
"0.5809704",
"0.5772941",
"0.5766276",
"0.5734142",
"0.573249",
"0.5719399",
"0.57193124",
"0.57102084",
"0.569536"
] |
0.7734259
|
0
|
You're designing the menu at a fancy restaurant and it's important to you that the courses be linked together by common ingredients. So, the first course and the second course must share an ingredient, the second and the third course must share an ingredient, etc. Write a function that tests a menu to make sure this condition is met. `courses` will be a list containing the description of each course in the
|
def course_tester(courses):
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_course_available(data, course):\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False",
"def testCourses(self):\n self.person.invokeFactory(type_name=\"FSDCourse\", id=\"test-course\")\n self.failUnless('test-course' in self.person.contentIds())\n self.failUnless('test-course' in [c.id for c in self.person.getCourses()])",
"def check_meal_available(data, meal):\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False",
"def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])",
"def collect_courses():\n clear_screen()\n full_courses = {}\n input(\"First, We need to build a list of every class required for your major, and their respective credit values.\")\n while True:\n clear_screen()\n print(full_courses)\n class_code = input(\"Please input course code. i.e: IT106\\n If you are finished, press q to quit\\n\")\n if class_code == 'q':\n break\n elif class_code.upper() in full_courses.keys():\n print(\"You have already input this class. Please try again\")\n continue\n class_code = class_code.upper()\n try:\n credit_hours = int(input(\"input the credit value for course: \"+class_code+\"\\n\"))\n grade = input(\"If you have already finished \" + class_code+\", please give your final letter grade. Otherwise type 0\\n\")\n status = input(\"Please give the status of this class: A-Actively Taking D-Dropped W-Withdrawn C-Completed\\n\")\n if status.upper() == 'A' or status.upper() == 'D' or status.upper() == 'W' or status.upper() == 'C': # changed this, OR can't be used after a single == like it was before\n full_courses[class_code] = [credit_hours, grade, status]\n else:\n input(\"Invalid selection\")\n continue\n except ValueError:\n input(\"Invalid entry. \")\n continue\n return full_courses",
"def course_in_courses(self, mnemo, courses):\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n # tuple is returned here, so that converting to bool with id = 0 result was True\n return course_id,\n return False",
"def get_selected_course(courses):\n num_of_courses = len(courses)\n\n c_number = None\n while True:\n c_number = int(input('Enter Course Number: '))\n\n if c_number not in range(1, num_of_courses+1):\n _print('Enter a valid number between 1 and ', num_of_courses)\n continue\n elif courses[c_number - 1].state != 'Started':\n _print('The course has not started!')\n continue\n else:\n break\n\n selected_course = courses[c_number - 1]\n return selected_course",
"def test_courseware_nav(self):\r\n # Navigate to the courseware page from the info page\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')\r\n\r\n # Check that the courseware navigation appears correctly\r\n EXPECTED_SECTIONS = {\r\n 'Test Section': ['Test Subsection'],\r\n 'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']\r\n }\r\n\r\n actual_sections = self.course_nav.sections\r\n for section, subsections in EXPECTED_SECTIONS.iteritems():\r\n self.assertIn(section, actual_sections)\r\n self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])\r\n\r\n # Navigate to a particular section\r\n self.course_nav.go_to_section('Test Section', 'Test Subsection')\r\n\r\n # Check the sequence items\r\n EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']\r\n\r\n actual_items = self.course_nav.sequence_items\r\n self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))\r\n for expected in EXPECTED_ITEMS:\r\n self.assertIn(expected, actual_items)",
"def required(choice):\n for key in MENU[choice][\"ingredients\"]:\n if resources[key] < MENU[choice][\"ingredients\"][key]:\n print(f\"Sorry there isn't enough {key}\")\n return False\n return True",
"def _has_access_course_desc(user, action, course):\r\n def can_load():\r\n \"\"\"\r\n Can this user load this course?\r\n\r\n NOTE: this is not checking whether user is actually enrolled in the course.\r\n \"\"\"\r\n # delegate to generic descriptor check to check start dates\r\n return _has_access_descriptor(user, 'load', course, course.id)\r\n\r\n def can_load_forum():\r\n \"\"\"\r\n Can this user access the forums in this course?\r\n \"\"\"\r\n return (\r\n can_load() and\r\n (\r\n CourseEnrollment.is_enrolled(user, course.id) or\r\n _has_staff_access_to_descriptor(user, course, course.id)\r\n )\r\n )\r\n\r\n def can_enroll():\r\n \"\"\"\r\n First check if restriction of enrollment by login method is enabled, both\r\n globally and by the course.\r\n If it is, then the user must pass the criterion set by the course, e.g. that ExternalAuthMap\r\n was set by 'shib:https://idp.stanford.edu/\", in addition to requirements below.\r\n Rest of requirements:\r\n Enrollment can only happen in the course enrollment period, if one exists.\r\n or\r\n\r\n (CourseEnrollmentAllowed always overrides)\r\n (staff can always enroll)\r\n \"\"\"\r\n # if using registration method to restrict (say shibboleth)\r\n if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD') and course.enrollment_domain:\r\n if user is not None and user.is_authenticated() and \\\r\n ExternalAuthMap.objects.filter(user=user, external_domain=course.enrollment_domain):\r\n debug(\"Allow: external_auth of \" + course.enrollment_domain)\r\n reg_method_ok = True\r\n else:\r\n reg_method_ok = False\r\n else:\r\n reg_method_ok = True #if not using this access check, it's always OK.\r\n\r\n now = datetime.now(UTC())\r\n start = course.enrollment_start\r\n end = course.enrollment_end\r\n\r\n if reg_method_ok and (start is None or now > start) and (end is None or now < end):\r\n # in enrollment period, so any user is allowed to enroll.\r\n debug(\"Allow: in enrollment period\")\r\n return True\r\n\r\n # if user is in CourseEnrollmentAllowed with right course key then can also enroll\r\n # (note that course.id actually points to a CourseKey)\r\n # (the filter call uses course_id= since that's the legacy database schema)\r\n # (sorry that it's confusing :( )\r\n if user is not None and user.is_authenticated() and CourseEnrollmentAllowed:\r\n if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course.id):\r\n return True\r\n\r\n # otherwise, need staff access\r\n return _has_staff_access_to_descriptor(user, course, course.id)\r\n\r\n def see_exists():\r\n \"\"\"\r\n Can see if can enroll, but also if can load it: if user enrolled in a course and now\r\n it's past the enrollment period, they should still see it.\r\n\r\n TODO (vshnayder): This means that courses with limited enrollment periods will not appear\r\n to non-staff visitors after the enrollment period is over. If this is not what we want, will\r\n need to change this logic.\r\n \"\"\"\r\n # VS[compat] -- this setting should go away once all courses have\r\n # properly configured enrollment_start times (if course should be\r\n # staff-only, set enrollment_start far in the future.)\r\n if settings.FEATURES.get('ACCESS_REQUIRE_STAFF_FOR_COURSE'):\r\n # if this feature is on, only allow courses that have ispublic set to be\r\n # seen by non-staff\r\n if course.ispublic:\r\n debug(\"Allow: ACCESS_REQUIRE_STAFF_FOR_COURSE and ispublic\")\r\n return True\r\n return _has_staff_access_to_descriptor(user, course, course.id)\r\n\r\n return can_enroll() or can_load()\r\n\r\n checkers = {\r\n 'load': can_load,\r\n 'load_forum': can_load_forum,\r\n 'enroll': can_enroll,\r\n 'see_exists': see_exists,\r\n 'staff': lambda: _has_staff_access_to_descriptor(user, course, course.id),\r\n 'instructor': lambda: _has_instructor_access_to_descriptor(user, course, course.id),\r\n }\r\n\r\n return _dispatch(checkers, action, user, course)",
"def test_has_course(self):\r\n check_has_course_method(\r\n XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),\r\n SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),\r\n locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS\r\n )",
"def test_course_filter(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n student = enrollment.student\n course_1 = CourseFactory(grade_levels=[enrollment.grade_level])\n course_2 = CourseFactory(grade_levels=[enrollment.grade_level])\n GradeFactory(student=student, graded_work__course_task__course=course_1)\n GradeFactory(student=student, graded_work__course_task__course=course_2)\n url = self.reverse(\"reports:progress\", pk=enrollment.id)\n url += f\"?course={course_1.id}\"\n\n with self.login(user):\n self.get_check_200(url)\n\n assert len(self.get_context(\"courses\")) == 1",
"def sufficient_resources(menu, drink, resources):\r\n menu['espresso']['ingredients']['milk'] = 0 # because espresso doesn't need any milk\r\n if resources['Water'] < menu[drink]['ingredients']['water']:\r\n print(\"Sorry there isn't enough water to make the drink.\")\r\n return False\r\n elif resources['Milk'] < menu[drink]['ingredients']['milk']:\r\n print(\"Sorry there isn't enough milk to make the drink.\")\r\n return False\r\n elif resources['Coffee'] < menu[drink]['ingredients']['coffee']:\r\n print(\"Sorry there isn't enough coffee to make the drink.\")\r\n return False\r\n else:\r\n return True",
"def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata",
"def get_course_offering(_course):\n link = _course['link']\n logging.info(\"calling get_course_offering with url \\\"%s\\\"\", link)\n\n scanned = conn.execute('SELECT * FROM offerings WHERE link=?', (link,)).fetchone()\n if scanned is None:\n # course wasn't checked before, load it up\n driver.get(link)\n sleep(4.7)\n\n logging.info(\"click on enroll ...\")\n try:\n driver.find_elements_by_class_name(\"EnrollButton\")[0].click()\n sleep(2.2)\n\n logging.info(\"check if course is part of multiple specializations ...\")\n is_unique = True\n try:\n choose_specialization = driver.find_element_by_id(\n \"course_enroll_s12n_selection_button_button\")\n is_unique = False\n choose_specialization.click()\n except NoSuchElementException:\n logging.info(\"course is unique!\")\n if not is_unique:\n logging.info(\"course is in multiple specializations, one selected ...\")\n sleep(1.1)\n\n logging.info(\"check if course is completely free ...\")\n is_free = False\n try:\n h4s = driver.find_elements_by_tag_name(\"h4\")\n for h4 in h4s:\n if \"Full Course, No Certificate\" in h4.text:\n is_free = True\n except NoSuchElementException:\n pass\n sleep(1.3)\n\n if is_free:\n logging.info(\"course is free \\\\o/ :)\")\n fare = 0\n\n # proceed only if course is not free\n else:\n logging.info(\"check if one can audit the course ...\")\n is_auditable = False\n\n # most courses have a sublime link \"audit only\" link\n try:\n driver.find_element_by_id(\"enroll_subscribe_audit_button\")\n is_auditable = True\n except NoSuchElementException:\n pass\n\n if is_auditable:\n logging.info(\"course is auditable :)\")\n fare = 1\n\n # if course is not auditable with the link, check if there is same enroll option\n else:\n # some courses have audit option in primary-description\n is_alternatively_auditable = False\n try:\n h4s = driver.find_elements_by_tag_name(\"h4\")\n for h4 in h4s:\n if \"Audit only\" in h4.text:\n is_alternatively_auditable = True\n except NoSuchElementException:\n pass\n finally:\n if is_alternatively_auditable:\n logging.info(\"course is auditable :)\")\n fare = 1\n else:\n logging.info(\"course is pay only :(\")\n fare = 2\n sleep(1.2)\n\n conn.execute(\"\"\"INSERT INTO offerings (\n link, title, university, category, fare) VALUES (?, ?, ?, ?, ?)\"\"\",\n (link, _course['title'], _course['university'], _course['category'], fare))\n conn.commit()\n\n except ElementNotInteractableException:\n logging.info(\"there are no upcoming sessions available ...\")\n\n else:\n logging.info(\"course already scanned ...\")",
"def populate_course(self):\r\n def descend(parent, stack):\r\n xblock_type = stack.pop(0)\r\n for _ in range(2):\r\n child = ItemFactory.create(category=xblock_type, parent_location=parent.location)\r\n if stack:\r\n descend(child, stack)\r\n\r\n descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])",
"def items(self, course):\r\n pass",
"def test_course_overview_view_with_course(self):\r\n course = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')\r\n resp = self._show_course_overview(course.id)\r\n self.assertContains(\r\n resp,\r\n '<article class=\"courseware-overview\" data-locator=\"location:MITx+999+Robot_Super_Course+course+Robot_Super_Course\" data-course-key=\"slashes:MITx+999+Robot_Super_Course\">',\r\n status_code=200,\r\n html=True\r\n )",
"def get_course_by_key_words(input):",
"def display_courses(courses):\n\n _print('You can access %d courses' % len(courses))\n for i, course in enumerate(courses, 1):\n _print('%d - [%s] - %s' % (i, course.state, course.name))",
"def get_requisites(description, type_):\n if type_ not in description: \n return '' \n\n local_not_offered = not_offered\n local_co_req = co_req\n local_no_credit_if_re = no_credit_if_re\n local_mulitple_re = mulitple_re\n local_find_match_re = find_match_re\n\n description = description.replace(' AND ', ' and ').replace(' OR ', ' or ').replace('; or', '/')\n description = description.replace('and either', ';').replace('and one of', ';')\n description = re.sub(local_no_credit_if_re, '', description)\n description = local_not_offered.split(description.rsplit('Offered:', 1)[0].split(type_, 1)[-1])[0]\n multiple = re.search(local_mulitple_re, description)\n if multiple:\n description = description.replace(multiple.group(0), f'{multiple.group(0)[:-4]};', 1)\n del multiple, local_mulitple_re, local_not_offered, local_no_credit_if_re\n if 'Prerequisite' in type_: description = local_co_req.split(description)[0] \n del local_co_req \n POI = ',POI' if 'permission' in description.lower() else '' \n new_result = []\n for course in description.split('(')[0].split(';'):\n if ', and' in course:\n new_result.append(course.replace(',', ';')) \n else:\n new_result.append(course)\n description = ';'.join(new_result)\n del new_result\n\n if 'with either' in description:\n with_either = description.split('with either')\n description = '{}&&{}'.format(with_either[0], with_either[1].replace(' or ', '/'))\n description = description.replace(' and ', '&&').replace(' or ', ',')\n\n def extract(course_option, split_char):\n elements = []\n for next_option in filter(None, course_option.split(split_char)):\n find_match(next_option, elements)\n return elements\n\n def find_match(to_match, to_append):\n match = re.search(local_find_match_re, to_match)\n if match: to_append.append(match.group(0))\n\n semi_colon = []\n for crs in filter(None, description.split(';')):\n comma = []\n for option in filter(None, crs.split(',')):\n if '/' in option and '&&' not in option:\n comma.append('/'.join(extract(option, '/')))\n elif '/' not in option and '&&' in option:\n comma.append('&&'.join(extract(option, '&&')))\n elif '/' in option and '&&' in option:\n doubleand = ('/'.join(extract(x, '/')) for x in filter(None, option.split('&&')))\n comma.append('&&'.join(doubleand))\n else:\n find_match(option, comma) \n semi_colon.append(','.join(filter(None, comma)))\n result = ';'.join(filter(None, semi_colon)).replace(' ', '') \\\n .strip(',').strip(';').strip('&').replace(';,', ';')\n result = f'{result}{POI}'\n del POI, semi_colon\n result = re.sub(r'&{3,}', '', result)\n result = ','.join(filter(None, result.split(',')))\n result = ';'.join(filter(None, result.split(';')))\n result = ','.join(dict.fromkeys(result.split(','))).replace(';&&', ';').strip('&')\n result = ';'.join(dict.fromkeys(result.split(';')))\n result = result.strip(',').strip(';').strip('&').replace(';,', ';').strip()\n filter_result = []\n for course in result.split(';'):\n if '/' in course and '&&' not in course:\n filter_result.append(course.replace('/', ','))\n elif '/' not in course and '&&' in course and ',' not in course:\n filter_result.append(course.replace('&&', ';'))\n else:\n filter_result.append(course)\n return ';'.join(filter_result)",
"def test_course_index_view_with_course(self):\r\n CourseFactory.create(display_name='Robot Super Educational Course')\r\n resp = self.client.get_html('/course/')\r\n self.assertContains(\r\n resp,\r\n '<h3 class=\"course-title\">Robot Super Educational Course</h3>',\r\n status_code=200,\r\n html=True\r\n )\r\n _test_no_locations(self, resp)",
"def test_get_course_list_with_invalid_course_location(self):\r\n request = self.factory.get('/course')\r\n request.user = self.user\r\n\r\n course_key = SlashSeparatedCourseKey('Org', 'Course', 'Run')\r\n self._create_course_with_access_groups(course_key, self.user)\r\n\r\n # get courses through iterating all courses\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 1)\r\n\r\n # get courses by reversing group name formats\r\n courses_list_by_groups = _accessible_courses_list_from_groups(request)\r\n self.assertEqual(len(courses_list_by_groups), 1)\r\n # check both course lists have same courses\r\n self.assertEqual(courses_list, courses_list_by_groups)\r\n\r\n # now delete this course and re-add user to instructor group of this course\r\n delete_course_and_groups(course_key, commit=True)\r\n\r\n CourseInstructorRole(course_key).add_users(self.user)\r\n\r\n # test that get courses through iterating all courses now returns no course\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 0)\r\n\r\n # now test that get courses by reversing group name formats gives 'ItemNotFoundError'\r\n with self.assertRaises(ItemNotFoundError):\r\n _accessible_courses_list_from_groups(request)",
"def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()",
"def add_s_courses(student_courses, resume_output):\n # creates a list to represent each line of the html code for courses section\n html_courses_section = []\n\n # opens the html section with a div tag\n html_courses_section.append(\"<div>\")\n\n # adds a courses section with header3 format to html section\n html_courses_section.append(surround_block(\"h3\", \"Courses\"))\n\n # makes a string of comma-separated (\", \") student courses\n student_courses_str = \"\" # creates a list of student courses, and assigns the first course\n for course in student_courses: # goes second course to last course... for each course\n\n # debugging\n # print(\"idl\" + course)\n\n # adds the student course to the string of courses\n student_courses_str += course\n\n # adds a comma after the course, unless it is the last course in the list\n if course is not student_courses[-1]:\n student_courses_str += \", \"\n\n # print(\"student_courses_str:\" + student_courses_str)\n\n # adds a line with str list of courses in span tag\n html_courses_section.append(surround_block(\"span\", student_courses_str))\n\n # closes the courses section\n html_courses_section.append(\"</div>\")\n # print(\"html courses section\", html_courses_section)\n\n # adds the courses section to the html code to be printed\n resume_output.extend(html_courses_section)\n # print(resume_output)\n\n # returns output code\n return resume_output",
"def get_prereqs(course_name=None):\n reset_dict()\n if course_name is None:\n course_name = input('Enter the course you would like to check: ')\n if course_name not in COURSE_DICT:\n course_name = 'This course has not been added to UCSD_courses.json\\n' \\\n 'Please update it and try again.'\n\n graph = pydot.Dot(graph_type='digraph')\n target = pydot.Node(course_name)\n graph.add_node(target)\n complete = []\n find_next_node(graph, course_name, complete)\n return graph",
"def about_course(command):\n\n response = {'code': None, 'title': None,\\\n 'prof': None, 'section1': None,\\\n 'timing1': None, 'room1' : None,\\\n 'section2': None, 'timing2': None,\\\n 'room2': None, 'description': None,\\\n 'prereq': None, 'perm': None}\n \n if re.search('ENPM611', command):\n response = {'code': 'ENPM 611', 'title': 'Software Engineering',\\\n 'prof': 'Christopher Ackermann', 'section1': '0101',\\\n 'timing1': 'Monday 4:00 - 6:40 PM', 'room1': 'JMP 2121',\\\n 'description': 'Software engineering concepts, methods, and practices important to both the theorist and the practitioner will be covered. The entire range of responsibilities expected of a software engineer are presented. The fundamental areas of requirements development, software design, programming languages, and testing are covered extensively. Sessions on supporting areas such as systems engineering, project management, and software estimation are also included.', 'prereq': 'Competency in one programming language; and must have completed an undergraduate software engineering course. Or permission of instructor.', 'perm': 'Permission of ENGR-CDL-Office of Advanced Engineering Education.'\n}\n elif re.search('ENPM613', command):\n response = {'code': 'ENPM 613', 'title': 'Software Design and Implementation', 'prof': 'Ioana Rus', 'section1': '0101', 'timing1': 'Wednesday 7:00 - 9:40 PM', 'room1': 'TBA', 'description': 'Software design concepts and practices within the field important to both the practitioner and the theorist will be covered. Architectural and detailed designs are included for batch, client/server, and real-time systems. Design considerations for structured, object-oriented, and Web-based systems are covered. Design of databases, user interfaces, forms, and reports are also included. Implementation issues that affect the design, including error handling, performance, and inter-process communication, are presented.', 'perm': 'Permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM631', command):\n response = {'code': 'ENPM631', 'title': 'TCP/IP Networking', 'prof': 'Pedram Fard', 'section1': '0101', 'timing1': 'Tuesday 7:00 - 9:40 PM', 'room1': 'TBA' , 'description': 'Describe how IP datagram travels through the internet and are routed from the source to the destination. Introduce the two transport protocols: UDP and TCP, the proper context to use each one, and related parameters and issues. Cover some other protocols, closely related to the TCP/IP that are responsible for the seamless operation of the Internet.', 'perm': 'ENPM602; or permission of instructor. And permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM687', command):\n response = {'code': 'ENPM687', 'title': 'Digital Forensics and Incidence Response', 'prof': 'Jonas Amoonarquah', 'section1': 'Online', 'timing1': 'NA', 'room1': 'NA', 'description': 'Students will implement a robust incident response methodology, including proper forensic handling of evidence, and cover legal aspects of national and international law regarding forensics. The bulk of the course covers evidence acquisition, preservation, analysis and reporting on multiple platforms.', 'perm':'None'}\n elif re.search('ENPM691', command):\n response = {'code': 'ENPM691', 'title': 'Hacking of C Programs and Unix Binaries', 'prof': 'Dharmalingam Ganesan', 'section1': '0101', 'timing1': 'Thursday 7:00 - 9:40 PM', 'room1': 'JMP 3201', 'description': 'Teaches the fundamentals of secure programming in C. An in depth discussion on various security vulnerabilities (e.g., buffer overflows) in C applications will be taught with hands-on demo of concepts during the class. Students will learn how a C program runs \"under-the-hood\". The course will teach nitty-gritty of C programs by analyzing at the assembly level. The course discusses best practices (e.g., coding standards) and design principles for secure programming so that security can be built-in during design time. In addition to assignments, students are required to present papers related to this course.', 'perm': None, 'prereq': 'ENEE150; or students who have taken courses with comparable content may contact the department.'}\n elif re.search('ENPM693', command):\n response = {'code': 'ENPM693', 'title': 'Network Security', 'prof': 'Sohraab Soltani', 'section1': '0101', 'timing1': 'Tuesday 7:00 - 9:40 PM', 'room1' : 'JMP 3201', 'description': 'Introduction to various approaches to design; specify and verify security protocols used in large systems and networks; familiarization with some current technologies. Security threats and countermeasures, communication security and basic encryption techniques, authentication protocols, data confidentiality and integrity, analysis of cryptographic protocols, and access control in large systems and networks.', 'perm': None, 'prereq': 'An operating systems and/or network protocol course or equivalent.'}\n elif re.search('ENPM694', command):\n response = {'code': 'ENPM694', 'title': 'Networks and Protocols', 'prof': 'Sohraab Soltani', 'section1': '0101', 'timing1': 'Wednesday 7:00 - 9:40 PM', 'room1': 'JMP 3201', 'description': 'Provides a deep understanding of TCP/IP protocol suit and routing in the internet. The course topics are: overview of TCP/IP, basics of IP protocol, basics of TCP protocol, Network Address Translation (NAT), Dynamic Host Configuration Protocol (DHCP), Internet Protocol Security (IPsec), Internet Control Message Protocol (ICMP), Simple Mail Transfer Protocol (SMTP), Domain Name Service (DNS), IPv6, Concepts of routing (Bellman-Ford and Dijkstra algorithms), Routing Information Protocol (RIP), Open Shortest Path First (OSPF), Interior Gateway Routing Protocol (IGRP), Enhance Gateway Routing Protocol (EIGRP), and Border Gateway Protocol (BGP).'}\n elif re.search('ENPM696', command):\n response = {'code': 'ENPM696', 'title': 'Reverse Software Engineering', 'prof': 'Allen Hazelton', 'section1': '0101', 'timing1': 'Tuesday 4:00 - 6:40 PM', 'room1': 'TBA', 'description': 'An in-depth understanding of software reverse engineering concepts and hands-on training with reverse engineering tools, including disassemblers, decompilers, and code analyzers. Students will become familiar with both low-level software and the x86 instruction set through binary reversing sessions. This course also provides insights into many subjects such as system security, source code analysis, software design, and program understanding that will be beneficial in a variety of fields.', 'prereq': 'ENPM691 and CMSC106; or permission of instructor. And permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM809J', command):\n response = {'code': 'ENPM809J', 'title': 'Cloud Security', 'prof': 'Kevin Shivers', 'section1': '0101', 'timing1': 'Monday 7:00 - 9:40 PM', 'room1': 'TBA', 'section2': '0201', 'timing2': 'Thursday 4:00 - 6:40 PM', 'room2': 'TBA', 'description': 'NA'}\n elif re.search('ENPM809R', command):\n response = {'code': 'ENPM809R', 'title': 'Software Defined Networking', 'prof': 'Emre Gunduzhan', 'section1': '0101', 'timing1': 'Monday 4:00 - 6:40 PM', 'room1': 'TBA', 'description': 'NA'}\n elif re.search('ENPM809W', command):\n response = {'code': 'ENPM809W', 'title': 'Security and Software', 'prof': 'Mikael Lindvall', 'section1': '0101', 'timing1': 'Thursday 7:00 - 9:40 PM', 'room1': 'TBA', 'description': 'NA'}\n\n return response",
"def validate_new_curriculum_courses(self, curriculum_courses):\n\n for cur in curriculum_courses:\n # check to make sure its in the general courses table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Course WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"course does not exist, we must create new one or cancel\") # todo\n\n return True",
"def student_login(student):\n # print menu for student\n student.menu_message()\n student.defined_available_courses()\n try:\n choice = int(input('your choice:'))\n except ValueError:\n print('invalid input,enter a number . \\n')\n logging.exception('invalid input in student menu')\n else:\n if choice == 1:\n # check that any course defined for student or not\n if len(student.defined_available_courses()) == 0:\n print('No courses have been defined for you yet. \\n')\n logging.warning('Try see offered courses when no defined courses')\n else:\n # show available courses that defined for student field\n student.show_available_courses()\n\n\n elif choice == 2:\n if len(student.defined_available_courses()) == 0:\n print('No courses have been defined for you yet')\n logging.warning('Try take course before courses defined')\n\n else:\n # student can take courses just in 2 case:she have not submitted yet or she submit but rejected by admin\n if student.take_course_permission():\n try:\n course_code = int(input('course code:'))\n except ValueError:\n print('invalid input,Enter a number .\\n')\n logging.exception('invalid input for course code[Try take course]')\n else:\n take = student.add_course(course_code)\n # if quantity of course is complete,take returned -1\n if take == -1:\n print('course capacity is complete.')\n logging.warning('Try take full course.')\n # if course didnt choose already by student and have enough quantity,take is 1\n elif take == 1:\n print(f'Total units:{student.total_units}')\n student.show_chosen_courses()\n logging.info('Student added a new course')\n # if student has already chosen the course\n elif take == 0:\n print('This course already has been chosen by you .')\n logging.warning('Try take existing course again ')\n elif take == 2:\n print('Course unavailable or not defined for you!')\n logging.warning('Try take unavailable course for student')\n else:\n print('You can not take courses at this time')\n logging.error('Try take course after successful submission')\n\n\n elif choice == 3:\n # student can drop courses just in 2 case:she have not submitted yet or she submit but rejected by admin\n if student.take_course_permission():\n if len(student.chosen_courses)== 0:\n print('No courses have been added yet.')\n logging.warning('Try drop course with no chosen courses')\n else:\n try:\n course_code = int(input('course code:'))\n except ValueError:\n print('invalid input,Enter a number .\\n')\n logging.exception('invalid input for course code[Try drop course]')\n else:\n if student.drop_course(course_code):\n print('Course dropped successfully.')\n logging.info('Student dropped a course')\n else:\n print('Code is not valid.')\n logging.warning('invalid course code for dropping')\n else:\n print('You can not drop courses at this time')\n logging.error('Try drop course after successful submission')\n\n elif choice == 4:\n # show student courses\n # if she submitted read them from students_info file\n if student.check_submission():\n # if courses approved by admin\n if student.check_status():\n student.show_submitted_courses()\n elif student.check_status() is False:\n if len(student.chosen_courses)!= 0:\n student.show_chosen_courses()\n else:\n print('your request has been rejected')\n logging.warning('Student courses rejected')\n # if student didnt submit show courses from chosen courses\n else:\n print(f' TOTAL UNITS : {student.total_units}')\n student.show_chosen_courses()\n elif choice == 5:\n if student.submit():\n print('submission is successfully.')\n logging.info('Student submitted courses')\n student.show_submitted_courses()\n else:\n print('You can\\'t submit.Your units number is low or too much. ')\n logging.warning('Unsuccessful student submission')\n\n elif choice == 6:\n student.logout()\n print('logout successfully\\n')\n logging.info('Student logged out')\n return False\n else:\n print('Unavailable option,choose another number!')\n logging.warning('Unavailable option input in student menu')\n\n return True",
"def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]"
] |
[
"0.70271784",
"0.62023354",
"0.6105342",
"0.59191686",
"0.5909621",
"0.58131725",
"0.5804522",
"0.5736799",
"0.5727533",
"0.57244295",
"0.57106405",
"0.57088274",
"0.5666606",
"0.56663203",
"0.566413",
"0.559278",
"0.5579136",
"0.55358875",
"0.55228984",
"0.5516206",
"0.5506379",
"0.54989886",
"0.5477551",
"0.54670155",
"0.5463426",
"0.54573405",
"0.54527557",
"0.543939",
"0.54288334",
"0.541009"
] |
0.6984037
|
1
|
The equivalent of the normal `flag` decorator, but for TriBitMask.
|
def triflag(func: Callable[[Any], int]) -> TriBitMask:
return TriBitMask(func(None))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def flags(self) -> UserFlag:",
"def _flag():\n current_flag = _flag.flag\n _flag.flag <<= 1\n return current_flag",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def setFlag(flagbyte, pos, status):\n if status:\n return flagbyte | 2**pos\n else:\n return flagbyte & ~2**pos",
"def create_basic_flag_mask(*flags):\n\n out = np.ones(len(flags[0]), bool)\n for flag in flags:\n out &= (~flag)\n\n return out",
"def shiftr_bitmask(self):\r\n self.__bitmask__ = self.__bitmask__ >> 1",
"def getFlag(flagbyte, pos):\n mask = 2**pos\n result = flagbyte & mask\n return (result == mask)",
"def get_flag(self, flag_name):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrctrl_upt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flags_reg = self.get_register('P')\n flag_index = flags[flag_name]\n return (flags_reg >> flag_index) & 1",
"def flag():\n pass",
"def flags(cls):\n\n assert cls.__bases__ == (object,)\n\n d = dict(cls.__dict__)\n new_type = type(cls.__name__, (int,), d)\n new_type.__module__ = cls.__module__\n\n map_ = {}\n for key, value in iteritems(d):\n if key.upper() == key and isinstance(value, integer_types):\n value_instance = new_type(value)\n setattr(new_type, key, value_instance)\n map_[value] = key\n\n def str_(self):\n value = int(self)\n matches = []\n for k, v in map_.items():\n if value & k:\n matches.append(\"%s.%s\" % (type(self).__name__, v))\n value &= ~k\n if value != 0 or not matches:\n matches.append(text_type(value))\n\n return \" | \".join(matches)\n\n def repr_(self):\n return \"<%s: %d>\" % (str(self), int(self))\n\n setattr(new_type, \"__repr__\", repr_)\n setattr(new_type, \"__str__\", str_)\n\n return new_type",
"def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n",
"def mask(self):",
"def bitmask(*args: Union[int, Sequence[int], Tuple[int, int]]) -> int:\n mask = 0\n\n for a in args:\n if isinstance(a, tuple):\n hi, lo = a\n mask |= ((1 << (hi - lo + 1)) - 1) << lo\n elif isinstance(a, (list, set)):\n mask |= reduce(operator.or_, ((1 << b) for b in a))\n elif isinstance(a, int):\n mask |= 1 << a\n\n return mask",
"def set_bitmask(self, value):\r\n self.__bitmask__ = value | 0xFF00",
"def flags_decomposer(flags):\n l = 0\n \n if flags & 2 ** 1:\n l = 1\n \n if flags & 2 ** 4:\n l = 2\n \n return l",
"def flags(self) -> undefined.UndefinedOr[UserFlag]:",
"def take_action_on_flags(self, *args, **kwargs):\r\n pass",
"def flag_set(self, flag):\n if self.flags & flag != 0:\n return True\n else:\n return False",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def get_bitmask(self):\r\n return self.__bitmask__",
"def getFlag(self, flag) -> bool:\n ...",
"def check_mask(f):\n def wrapper(*args, **kwargs):\n data = args[0]\n try:\n mask = data.mask\n except AttributeError:\n data = np.ma.array(data, mask=np.zeros(data.shape, dtype=np.bool))\n mask = data.mask\n args = list(args)\n args[0] = data\n args = tuple(args)\n return f(*args, **kwargs)\n return wrapper",
"def toggle_flag(self, bit):\n\n self.fl = self.fl ^ (1 << bit)",
"def BIT(self, value):\n result = self.reg.A & value\n self.reg.N = result >> 7\n self.reg.V = result >> 6 & 1\n self.reg.Z = result == 0",
"def set(self, *options: str) -> int:\n self.flags |= self.mask(*options)\n return self.flags",
"def setFlag(self, flag, value) -> None:\n ...",
"def _get_bit(self, num, bit, mask=1):\n return (int(num) >> bit) & mask",
"def get_bit(num, i):\n return num & (1 << i) != 0",
"def __int__(self):\n\n return self.bitflags",
"def set_bit(num, i):\n return num | (1 << i)"
] |
[
"0.6482749",
"0.64294463",
"0.6405516",
"0.6219988",
"0.6217365",
"0.616181",
"0.6008068",
"0.60012144",
"0.59498453",
"0.58851945",
"0.5867022",
"0.5866195",
"0.5804603",
"0.5781837",
"0.57502043",
"0.5726512",
"0.56852627",
"0.56843007",
"0.56681556",
"0.561124",
"0.55814916",
"0.5560303",
"0.5545787",
"0.5501109",
"0.54997534",
"0.5490865",
"0.5487433",
"0.54820347",
"0.5480248",
"0.5466144"
] |
0.71491706
|
0
|
Initialize a permission overwrite object from Discord data. This is used because a PermissionOverwrite object is often initialized by users.
|
def from_data(cls: Type[SELF], data: Dict) -> SELF:
self = cls.__new__(cls)
self.type = PermissionTarget(data['type'])
self.allow = Permissions(int(data['allow']))
self.deny = Permissions(int(data['deny']))
return self
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def assign_perm(self, permission, user, obj, ctype=None):\n if getattr(obj, 'pk', None) is None:\n raise ObjectNotPersisted(\"Object %s needs to be persisted first\" % obj)\n\n if not ctype:\n ctype = ContentType.objects.get_for_model(obj)\n\n if not isinstance(permission, Permission):\n permission = Permission.objects.get(content_type=ctype, codename=permission)\n\n obj_perm, created = self.get_or_create(\n content_type=ctype,\n permission=permission,\n object_pk=obj.pk,\n user=user)\n return obj_perm",
"def from_guild_create(self, **data: dict) -> 'Guild':\n self.unavailable = data.get(\"unavailable\", False)\n\n if self.unavailable:\n # We can't use any of the extra data here, so don't bother.\n return self\n\n self.name = data.get(\"name\") # type: str\n self._icon_hash = data.get(\"icon\") # type: str\n self._splash_hash = data.get(\"splash\") # type: str\n self.owner_id = int(data.get(\"owner_id\", 0)) or None # type: int\n self._large = data.get(\"large\", None)\n self.features = data.get(\"features\", [])\n self.region = data.get(\"region\")\n\n afk_channel_id = data.get(\"afk_channel_id\")\n if afk_channel_id is not None:\n afk_channel_id = int(afk_channel_id)\n\n self.afk_channel_id = afk_channel_id\n self.afk_timeout = data.get(\"afk_timeout\")\n\n self.verification_level = VerificationLevel(data.get(\"verification_level\", 0))\n self.mfa_level = MFALevel(data.get(\"mfa_level\", 0))\n self.notification_level = NotificationLevel(data.get(\"default_message_notifications\"))\n self.content_filter_level = ContentFilterLevel(data.get(\"explicit_content_filter\", 0))\n\n self.member_count = data.get(\"member_count\", 0)\n\n # Create all the Role objects for the server.\n for role_data in data.get(\"roles\", []):\n role_obj = role.Role(self._bot, **role_data)\n role_obj.guild_id = self.id\n self._roles[role_obj.id] = role_obj\n\n # Create all the Member objects for the server.\n self._handle_member_chunk(data.get(\"members\", []))\n\n for presence in data.get(\"presences\", []):\n member_id = int(presence[\"user\"][\"id\"])\n member_obj = self._members.get(member_id)\n\n if not member_obj:\n continue\n\n member_obj.presence = Presence(**presence)\n\n # Create all of the channel objects.\n for channel_data in data.get(\"channels\", []):\n channel_obj = channel.Channel(self._bot, **channel_data)\n channel_obj.guild_id = self.id\n channel_obj._update_overwrites(channel_data.get(\"permission_overwrites\", []),\n guild=self)\n self._channels[channel_obj.id] = channel_obj\n\n # Create all of the voice states.\n for vs_data in data.get(\"voice_states\", []):\n user_id = int(vs_data.get(\"user_id\", 0))\n member = self.members.get(user_id)\n if not member:\n # o well\n continue\n\n voice_state = dt_vs.VoiceState(**vs_data, client=self._bot)\n\n vs_channel = self._channels.get(int(vs_data.get(\"channel_id\", 0)))\n if vs_channel is not None:\n voice_state.channel_id = vs_channel.id\n voice_state.guild_id = self.id\n\n self._voice_states[voice_state.user_id] = voice_state\n\n # Create all of the emoji objects for the server.\n self._handle_emojis(data.get(\"emojis\", []))",
"def from_data(cls,data):\n\n new_object = cls() # Only this line needs to be updated\n new_object.data = data\n\n return new_object",
"def __init__(self, id=None, permission=None, permission_type=None, resource=None, resource_type=None, grant=None, deny=None, inherit=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._permission = None\n self._permission_type = None\n self._resource = None\n self._resource_type = None\n self._grant = None\n self._deny = None\n self._inherit = None\n self.discriminator = None\n\n self.id = id\n if permission is not None:\n self.permission = permission\n self.permission_type = permission_type\n if resource is not None:\n self.resource = resource\n self.resource_type = resource_type\n self.grant = grant\n self.deny = deny\n self.inherit = inherit",
"def __init__(self, data: list):\n self.__data = copy.deepcopy(data)",
"def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])",
"def initialize(self, request, args, kwargs):\n data = request_data.RequestData(request, args, kwargs)\n mutator = access_checker.Mutator(data)\n if data.is_developer:\n check = access_checker.DeveloperAccessChecker(data)\n else:\n check = access_checker.AccessChecker(data)\n return data, check, mutator",
"def __init__(self, data={}):\n self._update_(data)",
"def __populate_permissions(self):\n if self.auth_group.value:\n grpid = self.auth_group.value\n grp = AuthGroup.objects.get(pk=grpid)\n for perm in Permission.objects.all():\n if hasattr(self, perm.codename):\n if grp.permissions.filter(pk=perm.pk).exists():\n getattr(self, perm.codename).value = True\n else:\n getattr(self, perm.codename).value = False",
"def __init__(self_, data: Union[GCPSTSDelegateAccount, UnsetType] = unset, **kwargs):\n if data is not unset:\n kwargs[\"data\"] = data\n super().__init__(kwargs)",
"def __init__(self, data: Dict[str, Any]):\n self._conflicts = {key: Conflict(value) for key, value in data.items()}\n self._newer_with_warning_conflict = Conflict({'action': 'newer'})\n self._error_conflict = Conflict({})",
"def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore example permissions\n explore_access_perm = permission.objects.get(codename=explore_example_rights.explore_example_access)\n explore_save_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_save_query)\n explore_delete_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_delete_query)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm,\n explore_save_query_perm,\n explore_delete_query_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions : ' + e.message)",
"def __create_new_permission(self, codename, **kwargs) -> None:\n permission = Permission(codename=codename, **kwargs)\n permission.save()",
"def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()",
"def __init__(self, game, user, *, data=None, **kwargs):\r\n self.game = game\r\n self.user = user\r\n \r\n if data:\r\n self.from_data(data)\r\n else:\r\n self.initialize(**kwargs)",
"def from_data(cls, data):\n # Validation\n if data.get(\"_Serializable_classname\") != cls.__name__:\n return None\n del data[\"_Serializable_classname\"]\n if data.get(\"_Serializable_version\") is not None:\n del data[\"_Serializable_version\"]\n\n this = cls(None)\n this.__dict__.update(data)\n return this",
"def permission(guild_id: int, permissions: list):\n\n def wrapper(cmd):\n if not getattr(cmd, \"__permissions__\", None):\n cmd.__permissions__ = {}\n cmd.__permissions__[guild_id] = permissions\n return cmd\n\n return wrapper",
"def __init__(self, undo, redo, **kwargs):\n self._undo = undo\n self._redo = redo\n self._data = kwargs.get(\"data\", {})\n self.key = kwargs.get(\"key\", None)",
"def update(self, permission, **kwargs):\n kwargs['permission'] = permission\n return self.update_instance(**kwargs)",
"def set_user(self, args):\n permissions = \"\"\n\n permissions = ' '.join(args['<permission>'])\n\n config = self.config\n config[args['<mask>']] = permissions\n self.bot.db[self.key] = config",
"def from_data(self, data):\r\n for field in (field for field in self.SAVE_FIELDS if field not in data):\r\n cid = debug_id(guild=self.game.guild, user=self.user, charname=data.get('name',None))\r\n wg.log.warning(f\"Character {cid} missing field {field}\")\r\n\r\n for field in data:\r\n # Expects secured data\r\n setattr(self, field, data[field])",
"def test_only_default_perms(self):\n p1 = Permission.objects.get(codename='eat_spam')\n # Change the codename so that clean_permissions has something to clean:\n p1.codename = 'eat_lovelyspam'\n p1.save()\n # Add a permission that isn't a default permission of 'Spam':\n ct = ContentType.objects.get_for_model(Spam)\n p2 = Permission.objects.create(\n name='Can reject spam', codename='reject_spam', content_type=ct\n )\n stream = StringIO()\n with self.patcher(new=Mock(return_value=[p1, p2])):\n utils.clean_permissions(stream)\n self.assertTrue(stream.getvalue())\n p1.refresh_from_db()\n self.assertEqual(p1.codename, 'eat_spam', msg=\"p1.codename should have been reset\")\n p2.refresh_from_db()\n self.assertEqual(p2.codename, 'reject_spam', msg=\"p2.codename should have not been altered\")",
"def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(\n name=main_rights.DEFAULT_GROUP\n )\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(\n codename=explore_keyword_rights.EXPLORE_KEYWORD_ACCESS\n )\n\n # Add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception as exception:\n logger.error(\n \"Impossible to init explore_keyword permissions: %s\"\n % str(exception)\n )",
"def apply_perm(permission_name: Optional[str], entity: UserOrGroup):\n try:\n permission = Permission.from_name(permission_name or \"none\")\n except KeyError:\n raise exceptions.ParseError(f\"Unknown permission: {permission_name}\")\n\n obj.set_permission(permission, entity)",
"def from_data(cls, data):\n guild_id = parse_id(data)\n \n self = object.__new__(cls)\n self.approximate_online_count = parse_approximate_online_count(data)\n self.approximate_user_count = parse_approximate_user_count(data)\n self.description = parse_description(data)\n self._set_discovery_splash(data)\n self.emojis = parse_emojis(data, {}, guild_id)\n self.features = parse_features(data)\n self._set_icon(data)\n self.id = guild_id\n self._set_invite_splash(data)\n self.stickers = parse_stickers(data, {})\n self.name = parse_name(data)\n return self",
"def __init__(self,\n data=None,\n files=None,\n auto_id='id_%s',\n prefix=None,\n initial=None,\n error_class=ErrorList,\n label_suffix=None,\n empty_permitted=False,\n instance=None,\n *args,\n **kwargs):\n self.access_limits = kwargs.pop('access_limits', [])\n\n super(PermissionsForm, self).__init__(data,\n files,\n auto_id,\n prefix,\n initial,\n error_class,\n label_suffix,\n empty_permitted,\n instance,\n *args,\n **kwargs)\n\n # Find which users and groups haven't been added yet.\n if instance is None:\n addable_users = get_user_model().objects.all()\n addable_groups = Group.objects.all()\n else:\n addable_users, addable_groups = instance.other_users_groups()\n self.fields[\"permissions\"].set_old_users_groups(\n instance.users_allowed.all(),\n instance.groups_allowed.all())\n\n # Limit to users and groups that can see dependencies.\n for access_limit in self.access_limits:\n addable_users, addable_groups = access_limit.intersect_permissions(\n addable_users,\n addable_groups)\n self.fields[\"permissions\"].set_users_groups_allowed(addable_users,\n addable_groups)",
"def test__ApplicationCommandPermissionOverwrite__eq():\n allow = True\n target_id = 202302210005\n target_type = ApplicationCommandPermissionOverwriteTargetType.role\n \n keyword_parameters = {\n 'allow': allow,\n 'target': (target_type, target_id),\n }\n \n application_command_permission_overwrite = ApplicationCommandPermissionOverwrite(**keyword_parameters)\n \n vampytest.assert_eq(application_command_permission_overwrite, application_command_permission_overwrite)\n vampytest.assert_ne(application_command_permission_overwrite, object())\n \n for field_name, field_value in (\n ('allow', False),\n ('target', (target_type, 202302210006)),\n ('target', (ApplicationCommandPermissionOverwriteTargetType.channel, target_id)),\n ):\n test_application_command_permission_overwrite = ApplicationCommandPermissionOverwrite(\n **{**keyword_parameters, field_name: field_value}\n )\n vampytest.assert_ne(application_command_permission_overwrite, test_application_command_permission_overwrite)",
"def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data",
"def fromBukkit(cls, data):\n type_ = None\n if data.isUpgraded():\n type_ = cls.upgradeable.get(data.getType())\n elif data.isExtended():\n type_ = cls.extendable.get(data.getType())\n else:\n type_ = cls.regular.get(data.getType())\n Preconditions.checkNotNull(type_, \"Unknown potion type from data \" + data)\n return \"minecraft:\" + type_",
"def __init__(self, data=None): # noqa: E501 # noqa: E501\n\n self._data = None\n self.discriminator = None\n\n if data is not None:\n self.data = data"
] |
[
"0.5088103",
"0.50488174",
"0.5030711",
"0.49461928",
"0.4939557",
"0.49180445",
"0.48853058",
"0.48666456",
"0.4836884",
"0.48114455",
"0.48099852",
"0.48095408",
"0.4808097",
"0.4794472",
"0.4790157",
"0.476699",
"0.4765125",
"0.47534674",
"0.47128752",
"0.4712254",
"0.47098485",
"0.46850827",
"0.4683062",
"0.4678269",
"0.46733966",
"0.4669309",
"0.46633115",
"0.46350938",
"0.46251562",
"0.46165884"
] |
0.63469905
|
0
|
Find the average score from the sentence value dictionary
|
def average_score(self, sentenceValue):
sumValues = 0
for entry in sentenceValue:
sumValues += sentenceValue[entry]
# Average value of a sentence from original summary_text
average = (sumValues / len(sentenceValue))
return average
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average",
"def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average",
"def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) ->Dict[str, Tensor]:\n results: Dict[str, Tensor] = {}\n if sentence_results == {}:\n return results\n for rouge_key, scores in sentence_results.items():\n results[rouge_key] = torch.tensor(scores).mean()\n return results",
"def sentence_to_avg(sentence, word_to_vec_map):\n # Get a valid word contained in the word_to_vec_map. \n any_word = list(word_to_vec_map.keys())[0]\n \n ### START CODE HERE ###\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\n words = sentence.lower().split()\n\n # Initialize the average word vector, should have the same shape as your word vectors.\n avg = np.zeros(word_to_vec_map[any_word].shape)\n \n # Initialize count to 0\n count = 0\n \n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\n for w in words:\n # Check that word exists in word_to_vec_map\n if w in word_to_vec_map:\n avg += word_to_vec_map[w]\n # Increment count\n count +=1\n \n if count > 0:\n # Get the average. But only if count > 0\n avg = avg / count\n \n ### END CODE HERE ###\n \n return avg",
"def score_sentence(sentence, score_dict):\n\n word_set = get_words(sentence)\n score = 0\n\n for unique_word in word_set:\n for word in score_dict:\n if unique_word == word:\n score += score_dict[word]\n else:\n pass\n return score",
"def get_sentence_score(sentences, word_frequencies):\r\n sentence_scores = dict()\r\n for sent in sentences:\r\n word_count_without_stopwords=0\r\n for word in word_tokenize(sent.lower()):\r\n if word in word_frequencies.keys():\r\n word_count_without_stopwords+=1 \r\n if len(sent.split(' ')) < 30:\r\n if sent not in sentence_scores.keys():\r\n sentence_scores[sent] = word_frequencies[word]\r\n else:\r\n sentence_scores[sent] += word_frequencies[word]\r\n \r\n if sent in sentence_scores:\r\n sentence_scores[sent] = sentence_scores[sent]/word_count_without_stopwords\r\n \r\n print(sentence_scores) \r\n return sentence_scores",
"def score(self, sentence):\n score = 0.0\n last_token = None\n for token in sentence:\n if not last_token:\n last_token = token\n continue\n tup = (last_token, token)\n if tup in self.counts:\n score += self.s[tup]\n else: # stupid backoff to add-one smoothed unigram\n if self.s[token]: score += self.s[token]\n else: score += math.log(1.0 * (self.counts[token] + 1) / (self.ntokens * 2))\n last_token = token\n return score",
"def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score",
"def score(self, sentence):\n\n score = 0.0\n i = 0\n temp = \"\"\n for token in sentence:\n count = self.unigramCounts[token]\n if (i == 0):\n i = i + 1\n temp = token\n continue\n\n key = temp + \",\" + token\n bicount = self.bigramCounts[key]\n unicount = self.unigramCounts[temp]\n temp = token\n if bicount > 0 :\n\n score += (math.log(bicount) - math.log(unicount))\n else:\n unicount = self.unigramCounts[token]\n score += math.log(unicount + 1) + math.log(0.4)\n score -= math.log(self.total + len(self.unigramCounts))\n\n return score",
"def score(self, sentence):\n score = 0.0\n prev_word = None\n for token in sentence:\n two_words_count = self.bigram_count[prev_word][token]\n prev_word_count = self.unigram_count[prev_word]\n if (two_words_count > 0):\n score += math.log(two_words_count)\n score -= math.log(prev_word_count)\n else:\n score += math.log(self.backoff_multiplier)\n score += math.log(self.unigram_count[token] + 1.0)\n score -= math.log(self.num_words + self.vocabulary_size)\n prev_word = token\n return score",
"def score_sentences(tf_idf_matrix):\r\n\r\n sentenceValue = {}\r\n\r\n for sent, f_table in tf_idf_matrix.items():\r\n \r\n total_score_per_sentence = 0\r\n\r\n count_words_in_sentence = len(f_table)\r\n for word, score in f_table.items():\r\n \r\n total_score_per_sentence += score\r\n\r\n sentenceValue[sent] = total_score_per_sentence / count_words_in_sentence\r\n\r\n return sentenceValue",
"def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score",
"def score_sentences(self, tf_idf_matrix):\r\n\r\n sentenceValue = {}\r\n\r\n for sent, f_table in tf_idf_matrix.items():\r\n total_score_per_sentence = 0\r\n\r\n count_words_in_sentence = len(f_table)\r\n for word, score in f_table.items():\r\n total_score_per_sentence += score\r\n\r\n sentenceValue[sent] = total_score_per_sentence / count_words_in_sentence\r\n\r\n return sentenceValue",
"def calculate_avg_score(state_score,state_count):\n\tfor state in state_score.keys():\n\t\tstate_score[state] = 1.*state_score[state]/state_count[state]\n\treturn state_score",
"def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score",
"def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])",
"def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()",
"def average(entry):\n return entry['total time (s)'] / float(entry['correct answers'] + entry['wrong answers'])",
"def score(self, sentence):\n\n\n # TODO your code here\n score = 0.0 \n prevWord = \"\"\n prevPrevWord = \"\"\n newSentence = []\n for word in sentence:\n newSentence += word.split()\n for currentWord in sentence:\n currentWord = currentWord.strip(STRIP_CHARS)\n currentWord = currentWord.lower()\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n trigramCount = self.trigramCounts[trigram]\n if trigramCount > 0:\n score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])\n # Subtraction by 1 removes the add one count from the laplace\n # smoothing\n score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))\n elif self.bigramCounts[(prevWord, currentWord)] > 0:\n score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)\n score -= math.log(self.totalBigramCounts)\n else:\n count = self.unigramCounts[currentWord]\n score += math.log(count * UNI_BACKOFF_COEFFICIENT)\n score -= math.log(self.total)\n else:\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n return -score",
"def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score",
"def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE",
"def avg_hw_one(students_dict):\n scores = [\n hw['Homework 1']\n for hw in students_dict.values()\n ]\n hw_average = sum(scores) / len(scores)\n return hw_average",
"def phrase_scores(self):\n phrase_scores = dict()\n word_scores = self.word_scores()\n for phrase in self.candidate_keywords():\n words = split_words(phrase)\n for word in words:\n phrase_scores[phrase] = phrase_scores.get(phrase, 0) + word_scores[word]\n return phrase_scores",
"def sentiment(self) -> Dict[str, float]:",
"def sentiment_analyzer_scores(sentence):\n score = get_sentiment_analyzer().polarity_scores(sentence)\n return 'Negative Score:', score['neg'], 'Neutral Score:', score['neu'], 'Positive Score:', score['pos'], 'Compound Score:', score['compound']",
"def getScore(self, sentence):\r\n \r\n score = 0\r\n \r\n for word in sentence.words:\r\n score += len(word)\r\n \r\n return score",
"def mean_avg_precision(top_k_results, relevance):\n map_score = 0.0\n for j, scores in relevance.items():\n precision, _ = calculate_precision_recall(top_k_results[j - 1], scores)\n relevant = set()\n for x in scores:\n relevant.add(x[0])\n \n precision_score, cnt = 0.0, 0\n for i in range(len(top_k_results[j - 1])):\n if top_k_results[j - 1][i] in relevant:\n precision_score += precision[i]\n cnt += 1\n \n map_score += precision_score if cnt == 0 else precision_score / cnt\n \n map_score /= len(relevance)\n \n return map_score",
"def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score",
"def average_scores(self, scores, education, count):\n\n for key in scores.keys():\n for k in scores[key].keys():\n scores[key][k] = round(scores[key][k] / count[key][k], 1)\n education[key][k] = round(education[key][k] / count[key][k], 1)\n\n return scores, education",
"def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)"
] |
[
"0.84489435",
"0.8114358",
"0.71427697",
"0.7107199",
"0.7078907",
"0.69673705",
"0.680161",
"0.6781092",
"0.6773353",
"0.67545366",
"0.67071134",
"0.67013216",
"0.6655128",
"0.6633259",
"0.6628792",
"0.66243255",
"0.65700173",
"0.65391093",
"0.6483531",
"0.6468535",
"0.6466622",
"0.6430311",
"0.6416139",
"0.64148295",
"0.6408351",
"0.63954794",
"0.63886905",
"0.63817406",
"0.63816625",
"0.6367288"
] |
0.8261746
|
1
|
Run this function to get output/discounted_values.csv 100 gamma (discount factor) values for each node in data/treeNodePolicyIncludingN=1.csv
|
def values_per_gamma():
keys, data = parse("data/treeNodePolicyIncludingN=1.csv")
gammas = [1] + [round(0.01*i,2) for i in range(100)]
with open('output/discounted_values.csv', mode='w') as out_file:
out_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
out_writer.writerow(['world', 'node', 'child', 'value'] + ['dv_'+str(gamma) for gamma in gammas[1:]])
for d in data:
values = [round(node_value(d[keys['world']], d[keys['child']], gamma),3) for gamma in gammas]
out_writer.writerow([d[keys['world']], d[keys['node']], d[keys['child']]] + values)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")",
"def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)",
"def start(key, value, p):\r\n graph = nx.read_edgelist(value, comments='%', create_using=nx.Graph(), nodetype=int)\r\n\r\n columns = ['Percentage', 'Degree', 'EigenVector']\r\n df = pd.DataFrame(columns=columns)\r\n methods = {'Degree': SampledGraphDegree, 'EigenVector': SampledGraphEigenVector, 'Katz': SampledGraphKatz}\r\n\r\n x = [2 * t for t in range(0, 10)]\r\n df['Percentage'] = x\r\n # failure_p = 0.5 DEFAULT\r\n # failure_p = Utils.failure_probability(graph)\r\n # failure_p = 0.1\r\n # failure_p = 0.3\r\n deg = Utils.failure_probability(graph)\r\n failure_p = [0.1, 0.3, 0.5, 0.7, deg]\r\n for fp in failure_p:\r\n for method in methods:\r\n if method != 'Katz':\r\n g = methods[method](graph, fp)\r\n s = Simulation(g)\r\n y = s.run_simulation()\r\n df[method] = y\r\n else:\r\n lam = Utils.largest_eigenvalue(graph)\r\n spectral_radius = 1.0 / lam\r\n # alpha_values = {'0.1': 0.1 * spectral_radius, '0.5': 0.5 * spectral_radius}\r\n # 0.00001\r\n # alpha_values = {'0.00001': 0.00001 * spectral_radius, '0.1': 0.1 * spectral_radius}\r\n alpha_values = {'0.1': 0.1 * spectral_radius}\r\n for alpha in alpha_values:\r\n g = methods[method](graph, fp, alpha_values[alpha])\r\n s = Simulation(g)\r\n y = s.run_simulation()\r\n mk = method + '_' + alpha\r\n df[mk] = y\r\n # writer = ExcelWriter(p + \"\\Results/%s.xlsx\" % key)\r\n # df.to_excel(writer, 'Sheet5')\r\n # writer.save()\r\n if fp == deg:\r\n df.to_csv(path_or_buf=(p + \"\\Results/%s_degree.csv\" % key), index=False)\r\n else:\r\n df.to_csv(path_or_buf=(p + \"\\Results/%s_%s.csv\" % (key, fp)), index=False)",
"def generateStats(self):\n\t\tn = float(self.n)\n\t\tm = float(self.m)\n\t\tk = float(self.k)\n\t\tp_fp = math.pow(1.0 - math.exp(-(k*n)/m), k)\n\t\tprint \"Probability of false positives: \", p_fp\n\t\tprint \"Predicted false positive rate: \", p_fp * 100.0\n\t\tprint \"Number of elements entered in filter: \", n\n\t\tprint \"Number of bits in filter: \", m\n\t\tprint \"Number of hashes in filter: \", k",
"def calc_std_nDCG_AP_corpus_smoothing(p):\n \n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n k_val = 50\n NDCG_AP_all_claims_all_param_values = read_pickle(measures_res+\"NDCG_AP_prec_at_k_all_claims_all_param_values_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:clm,alpha_f,beta_f,k_val,lambda_f val nDCG_score,AP_score\n each_params_AVGnDCG_MAP_dict = read_pickle(measures_res+\"each_params_AVGnDCG_MAP_prec_at_k_dict_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:alpha_f,beta_f,k_val,lambda_f\n nDCG_MAP_std = {} #key is a configuration quadruplet, value is the std of the measures\n \n \n \n# for k_val in top_k_docs_values:\n for alpha in range(0,11,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n curr_AP_var = 0\n curr_nDCG_var = 0\n curr_prec_at_5_var = 0\n curr_prec_at_10_var = 0\n for clm in claim_list:\n curr_nDCG_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][0] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][0])**2\n curr_AP_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][1] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][1])**2\n curr_prec_at_5_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][2] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][2])**2\n curr_prec_at_10_var +=(NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][3] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][3])**2\n curr_nDCG_std = float(float(math.sqrt(curr_nDCG_var))/float(len(claim_list)))\n curr_AP_std = float(float(math.sqrt(curr_AP_var))/float(len(claim_list)))\n curr_prec_at_5_std = float(float(math.sqrt(curr_prec_at_5_var))/float(len(claim_list)))\n curr_prec_at_10_std =float(float(math.sqrt(curr_prec_at_10_var))/float(len(claim_list)))\n nDCG_MAP_std[alpha_f,beta_f,k_val,lambda_f] = (curr_nDCG_std,curr_AP_std,curr_prec_at_5_std,curr_prec_at_10_std)\n save_pickle(measures_res+\"nDCG_MAP_prec_at_k_std_for_each_configuration_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), nDCG_MAP_std)",
"def main() -> None:\r\n\r\n with open('main/NLP/LDA/IHE_RESULTS/scopus_prediction_results.json', 'r') as f:\r\n results = json.load(f)\r\n\r\n lst = {}\r\n cols = []\r\n for i in range(20):\r\n lst[str(i)] = []\r\n cols.append(str(i))\r\n\r\n for i in range(20):\r\n for doi, vals in results.items():\r\n if vals[str(i)] >= THRESHOLD:\r\n lst[str(i)].append(doi)\r\n\r\n generate_csv(lst, cols, \"main/NLP/LDA/IHE_RESULTS/pub_analyse_20.csv\")",
"def evaluate(out_dict, n):\n out = dict()\n for key, entry in out_dict.items():\n out[key] = dict()\n for it_count, data in entry.items():\n total = 0.\n count = 0\n for x_list in data.values():\n total += analytic_value_VaR(x_list[-1])\n count += 1\n out[key][it_count] = total / count\n np.save('normal_out_all_cvar_%d.npy' % n, out)\n print(out)",
"def summarize(data, verbal=False, using_files=True):\n\n if using_files:\n for file_name in tqdm(data):\n fill_table(pd.read_csv(file_name))\n else:\n for table in tqdm(data):\n fill_table(table)\n\n for cluster in table_summary:\n #total_genes = sum(table_summary[cluster][\"phylum\"].values) # number of genes\n #total_genes = table_summary[cluster][\"N\"] # number of samples\n total_genes = table_summary[cluster][\"eggNOG\"].eggNOG.sum() # number of genes in COGs with duplicates\n \n phylum_percent = table_summary[cluster][\"phylum\"].apply(lambda x: x/total_genes * 100)\n phylum_percent.columns = [\"percent\"]\n table_summary[cluster][\"phylum\"] = pd.concat([table_summary[cluster][\"phylum\"],phylum_percent],axis=1)\n\n #Read above for fix\n genus_percent = table_summary[cluster][\"genus\"].apply(lambda x: x/total_genes * 100)\n genus_percent.columns = [\"percent\"]\n table_summary[cluster][\"genus\"] = pd.concat([table_summary[cluster][\"genus\"],genus_percent],axis=1)\n\n #read above for fix\n cog_percent = table_summary[cluster][\"eggNOG\"].apply(lambda x: x/table_summary[cluster][\"gene_cog\"] * 100)\n cog_percent.columns = [\"percent\"]\n table_summary[cluster][\"eggNOG\"] = pd.concat([table_summary[cluster][\"eggNOG\"],cog_percent],axis=1)\n\n #Print the data\n if verbal:\n print \"Cluster %s:\\n\" % cluster\n print \"Number of Samples: %d\\n\" % table_summary[cluster][\"N\"]\n print \"Taxonomy:\"\n print table_summary[cluster][\"phylum\"].sort(\"percent\", ascending=False)\n print \"----------------------------------\"\n print table_summary[cluster][\"genus\"].sort(\"percent\", ascending=False)\n print \"-----------------------------------\"\n print \"COGS:\"\n print table_summary[cluster][\"eggNOG\"].sort(\"percent\", ascending=False)\n print \"------------------------------------\"\n print \"End Summary\"",
"def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities",
"def caculate_prob(self):\n t_H = self.tree.depth()\n t_h = 1\n while(t_h <= t_H):\n t_hnodes = self.get_h(t_h)\n t_sum = 0\n t_hpro = []\n t_cpro = []\n for t_n in t_hnodes:\n t_sum = self.tree.get_node(t_n).data[0] + t_sum\n t_node = self.tree.get_node(t_n)\n if t_node.is_leaf():\n t_node.data.append(0)\n continue\n t_childrens = self.tree.children(t_n)\n t_shang = 0\n for child in t_childrens:\n t_shang = t_shang + (child.data[0]/t_node.data[0])*np.log(child.data[0]/t_node.data[0])\n t_node.data.append(-t_shang)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n t_parentnode = self.tree.parent(t_n)\n if t_h > 1:\n t_node.data.append((t_node.data[0] / t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n t_hpro.append((t_node.data[0]/t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n else:\n t_node.data.append((t_node.data[0] / t_sum))\n t_hpro.append((t_node.data[0] / t_sum))\n\n t_cpro.append(t_node.data[1])\n t_ndata = np.array(t_hpro)\n mean = np.mean(t_ndata)\n std = np.std(t_ndata,ddof=1)\n t_sdata = np.array(t_cpro)\n mean_s = np.mean(t_sdata)\n std_s = np.std(t_sdata,ddof=1)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n if(std != 0):\n t_node.data[2] = (t_node.data[2] - mean)/std\n else:\n t_node.data[2] = (t_node.data[2] - mean)\n if(mean_s == 0 and std_s ==0):\n t_node.data[1] = -100.0\n continue\n t_node.data[1] = (t_node.data[1] - mean_s)/std_s\n t_h = t_h + 1",
"def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)",
"def prep_tree_data(self, number: int):\n filename = \"data-before-normalization-{}-out-of-7.csv\".format(number)\n path = str(DATA_PATH.joinpath(\"data-splitted\", filename))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n assessments = [x for x in df.columns.values if x.split(\"_\")[0] == \"assessment\"]\n df['average_score'] = df[assessments].mean(skipna=True, axis=1)\n for assessment in assessments: # somehow he doesn't want to fillna in a batch?\n df[assessment].fillna(df['average_score'], inplace=True)\n clicks = [x for x in df.columns.values if x.split(\"_\")[0] == \"vle\"]\n df['vle_click_average'] = df[clicks].mean(skipna=True, axis=1)\n for click in clicks: # somehow he doesn't want to fillna in a batch?\n df[click].fillna(df['vle_click_average'], inplace=True)\n df.dropna()\n\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Fail': 0, 'final_result__Withdrawn': 2,\n 'final_result__Pass': 1, 'final_result__Distinction': 3}\n self.change_oh_cat(\"final_result\", df, result_order)\n df[\"final_result\"].replace(2, 0, inplace=True)\n df[\"final_result\"].replace(3, 1, inplace=True)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test",
"def generate_data(self,\n num_batches,\n num_samples,\n percentile):\n\n xs = []\n ys = []\n ws = []\n\n for j in range(num_batches):\n\n # sample designs from the prior\n z = tf.random.normal([num_samples, self.latent_size])\n q_dx = self.q_vae.decoder.get_distribution(z, training=False)\n p_dx = self.p_vae.decoder.get_distribution(z, training=False)\n\n # evaluate the score and importance weights\n x = q_dx.sample()\n y = self.ensemble.get_distribution(x).mean()\n log_w = p_dx.log_prob(x)[..., tf.newaxis] - \\\n q_dx.log_prob(x)[..., tf.newaxis]\n while len(log_w.shape) > 2:\n log_w = tf.reduce_sum(log_w, axis=1)\n\n xs.append(x)\n ys.append(y)\n ws.append(tf.math.exp(log_w))\n\n # locate the cutoff for the scores below the percentile\n gamma = tfp.stats.percentile(ys, percentile)\n\n for j in range(num_batches):\n\n # re-weight by the cumulative probability of the score\n d = self.ensemble.get_distribution(xs[j])\n ws[j] *= 1.0 - d.cdf(tf.fill([num_samples, 1], gamma))\n\n return tf.concat(xs, axis=0), \\\n tf.concat(ys, axis=0), \\\n tf.concat(ws, axis=0)",
"def main():\n\trelations = [json.loads(x) for x in open('tutorial/pdtb_trial_data.json')]\n\toutput_relations = [convert_to_output(x) for x in relations]\n\toutput_relations[1]['Connective']['TokenList'] = [0]\n\toutput_relations[3]['Arg1']['TokenList'].pop(4)\n\toutput_relations[4]['Arg2']['TokenList'].pop(4)\n\toutput_relations[5]['Arg2']['TokenList'].pop(4)\n\toutput_relations[6]['Sense'] = [u'Contingency.Condition'] # This will hurt sense recall\n\toutput_relations.pop(0) # This will hurt all precision\n\tscorer.evaluate(relations, output_relations)\n\treturn output_relations",
"def do_a_series_of_propagations(self):\n index = 0\n header = 'col1'\n while index < self.rounds and self.flag:\n index = index + 1\n print(\"\\nLabel propagation round: \" + str(index)+\".\\n\")\n self.do_a_propagation()\n print(index)\n with open ('./data/lpa.txt','w') as f:\n for i in self.nodes:\n f.write(str(self.labels[i])+'\\t')\n f.write(str(i)+'\\t')\n f.write('\\n')\n \n print(\"\")\n print(\"Modularity is: \" + str(round(modularity( self.labels,self.graph,0.2), 40)) + \".\\n\")\n json_dumper(self.labels, self.args.assignment_output)",
"def create_silver_labels(file_path, k, l):\n with open(file_path + \"allCountries.txt\", 'r') as cf:\n countries = [line.strip() for line in cf.readlines()]\n with open(file_path + \"label_counts_evid_pair.csv\", \"r\") as f:\n reader = csv.reader(f)\n with open(\"results/\" + \"/countries/evalRes_\" + str(k) + \"_\" + str(l) + \".csv\", \"w+\", newline='') as rf:\n csv.writer(rf).writerow([\"adjective\", \"Step 2 label\", \"FC label\", \"rand label\"])\n with open(\"results/\" + \"/ethnicities/evalRes_\" + str(k) + \"_\" + str(l) + \".csv\", \"w+\", newline='') as rf:\n csv.writer(rf).writerow([\"adjective\", \"Step 2 label\", \"FC label\", \"rand label\"])\n header = next(reader)\n for row in reader:\n print(row)\n country, adj1, adj2, pos_sup, neg_sup, neut_sup, pos_ref, neg_ref = row\n if country in countries:\n res_path = \"results/countries/evalRes_\"\n else:\n res_path = \"results/ethnicities/evalRes_\"\n if country == \"NoPrem\":\n continue\n\n pos_sup = int(pos_sup)\n neg_sup = int(neg_sup)\n pos_ref = int(pos_ref)\n neg_ref = int(neg_ref)\n\n # classes: 0 neutral corr, 1 negative corr, 2 positive corr\n class_label1 = 0\n class_label2 = 0\n\n # classify with decision boundaries\n if pos_sup > neg_sup + k:\n class_label1 = 2\n else:\n if pos_ref > neg_ref + l:\n class_label1 = 1\n if neg_ref > pos_ref + l:\n class_label2 = 1\n else:\n if neg_sup > pos_sup + k:\n class_label2 = 2\n rand_label1 = random.randint(1, 2)\n rand_label2 = random.randint(1, 2)\n with open(res_path + str(k) + \"_\" + str(l) + \".csv\", \"a\", newline='') as rf:\n writer = csv.writer(rf)\n writer.writerow([country + \"_\" + adj1, \"2\", class_label1, rand_label1])\n writer.writerow([country + \"_\" + adj2, \"1\", class_label2, rand_label2])",
"def generateStatisticsForStep3():\n os.chdir( config.CORPUS_DIRECTORY )\n invertedIndex = {}\n with open('Step3.csv') as f:\n fileList = csv.reader( f )\n for entry in fileList:\n postingList = list(ast.literal_eval(entry[2]))\n invertedIndex[(entry[0], int(entry[1]))]= [ int(x) for x in postingList]\n generateFreqGraph( invertedIndex )\n generatePostingGapGraph( invertedIndex )",
"def output_cost(run_name):\n\tf = open(toggles.OUTPUT_PATH + run_name + '_sample_cost.csv', 'a')\n\n\tfor p in toggles.CHOSEN_PREDS:\n\t\tpred = Predicate.objects.all().get(pk=p+1)\n\t\tf.write(pred.question.question_text + '\\n')\n\t\tavg_cost = 0.0\n\t\tnum_finished = 0.0\n\n\t\tfor ip in IP_Pair.objects.filter(predicate=pred, status_votes=5):\n\t\t\tcost = ip.num_yes + ip.num_no\n\t\t\tif cost%2 == 1:\n\t\t\t\tavg_cost += cost\n\t\t\t\tnum_finished += 1\n\t\t\tf.write(ip.item.name + ': ' + str(cost) + ', ')\n\n\t\tif num_finished != 0:\n\t\t\tavg_cost = avg_cost/num_finished\n\n\t\tf.write('\\n' + 'avg cost: ' + str(avg_cost) + ', calculated selectivity: ' + str(pred.calculatedSelectivity) + '\\n \\n')\n\tf.write('\\n')\n\tf.close()",
"def evaljointbayes(fname):\n #read file and save lines to contents\n contents = []\n f = open(fname)\n for line in f:\n randomVar = line.rstrip().split()\n if randomVar[0] != 'END':\n contents.append(randomVar)\n print \"1. Read file\", fname, \"successfully.\"\n f.close()\n \n #count numbers of nodes and probabilities in each line\n length = len(contents)\n nodes, prob = [0] * length, [0] * length\n table = [] #save all probabilities for each node \n for num in range(0, length):\n tableline = []\n for i in contents[num]:\n try:\n j = float(i)\n prob[num] += 1\n tableline.append(j) \n except ValueError:\n if i != 'NONE':\n nodes[num] += 1 \n table.append(tableline) \n \n #print out the joint distribution formular\n print \"2. The joint distribution using this network is:\"\n nodelist = []\n for line in contents:\n nodelist.append(line[0])\n print \"P(\", printElement(nodelist), \") = \"\n for num in range(0, length):\n line = contents[num]\n if nodes[num] == 1:\n print \"P(\", line[0], \")\", \n else: \n print \"P(\", line[0], '|', printElement(line[1:nodes[num]]),\\\n \")\", \n if num == length - 1:\n print ' '\n else: \n print ' * ', \n \n #print out the full joint distribution table \n###This is the revised version using recursive calls and###\n###print out the cpt table to a .txt file###\n #further revision includes deleting depth by pop() table, contents and nodes\n #also, I can extract the parents in the previous step, then contents will not be used here\n fo=open(fname+'.zz.txt','w')\n result = 1.0\n depth = 0\n global additions, multiplications\n additions, multiplications = 0, 0\n fullCPT(nodelist, [], result, depth, fo, contents, table, nodes)\n fo.close()\n\n #print out result of step 3 \n print \"3. Additions and multiplications needed to calculate\",\\\n \"the joint distribution is:\", additions, \"and\", multiplications\n print \"The number of nodes in the network is: \", length\n\n #print out reselt of step 4\n spaceFull = int(math.pow(2,length)) - 1\n spaceBN = sum(prob)\n print \"4. Space this network saved is (Compactness): \", spaceBN, \"/\", \\\n spaceFull, \"=\", float(spaceBN) / float(spaceFull), '\\n'\n return",
"def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC",
"def find_entropy(less_than_threshold,more_than_threshold):\n\n ''' Storing total number of records '''\n total_records = len(less_than_threshold) + len(more_than_threshold)\n\n ''' Calculating the probability '''\n less_than_probability = len(less_than_threshold) / total_records\n more_than_probability = len(more_than_threshold) / total_records\n\n ''' Converting the dataframe to numpy arrays '''\n less_than_threshold_values = less_than_threshold.values\n more_than_threshold_values = more_than_threshold.values\n\n ''' Storing the target attribute values (Muffin or Cupcake) for threshold values '''\n target_for_less_than = less_than_threshold_values[:, -1]\n target_for_more_than = more_than_threshold_values[:, -1]\n\n ''' Finding the counts of muffin and cupcake for values lower than and greater than threshold value '''\n recipe_type, less_than_cupcake_muffin_count = np.unique(target_for_less_than, return_counts=True)\n recipe_type, more_than_cupcake_muffin_count = np.unique(target_for_more_than, return_counts=True)\n\n # print(recipe_type, more_than_cupcake_muffin_count, len(more_than_cupcake_muffin_count))\n ''' To ensure there are at least 5 records in each node '''\n if less_than_cupcake_muffin_count.sum() < 5 or more_than_cupcake_muffin_count.sum() < 5:\n ''' Return horrible badness '''\n return math.inf\n else:\n ''' Find the entropies for less than threshold values and more than threshold values '''\n less_than_entropy = sum((less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()) * - np.log2(\n less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()))\n more_than_entropy = sum((more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()) * - np.log2(\n more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()))\n\n ''' Calculate the total weighted entropy '''\n total_weighted_entropy = less_than_probability * less_than_entropy + more_than_probability * more_than_entropy\n\n return total_weighted_entropy",
"def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()",
"def mc_policy_evaluation(env, policy, Q_value, n_visits, gamma=0.9):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n episode = generate_episode(env, policy)\n # print(episode)\n returns = generate_returns(episode, gamma=gamma)\n # print(returns)\n visit_flag = np.zeros((nS, nA))\n ############################\n # YOUR IMPLEMENTATION HERE #\n \n for i in range(0, len(episode)): #for each state,action,reward in the episode\n # print(\"sub episode: i = \" + str(i))\n s_t, a_t, r_t = episode[i] \n \n if visit_flag[s_t, a_t] == 0: # if the current state, action has not been visited previously in this episode\n # print(\"state: \" + str(s_t) + \", action: \" + str(a_t))\n # print(\"Q = \" + str(Q_value))\n # print(\"returns = \" + str(returns))\n \n visit_flag[s_t, a_t] = 1 # set flag to 1 (so we don't revisit the state again in this episode)\n n_visits[s_t, a_t] += 1 # increase the total number of visits to the state in ALL episodes\n Q_value[s_t,a_t] = Q_value[s_t,a_t] + 1/n_visits[s_t, a_t] * (returns[i] - Q_value[s_t,a_t]) # update Q value\n\n ############################\n return Q_value, n_visits",
"def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)",
"def generate_csv(self, context): # pylint: disable=R0912,R0914\n temp = self.percentage()\n total_state = self.unique_freq()\n offline_value = -1\n ghz_conversion = 1000000\n mhz_conversion = 1000\n with open(self.outfile, 'a+') as f:\n writer = csv.writer(f, delimiter=',')\n reader = csv.reader(f)\n # Create the header in the format below\n # workload name, iteration, state, A7 CPU0,A7 CPU1,A7 CPU2,A7 CPU3,A15 CPU4,A15 CPU5\n if sum(1 for row in reader) == 0:\n header_row = ['workload', 'iteration', 'state']\n count = 0\n for cluster, states_list in enumerate(self.currentstates_of_clusters):\n for dummy_index in range(len(states_list)):\n header_row.append(\"{} CPU{}\".format(self.corename_of_clusters[cluster], count))\n count += 1\n writer.writerow(header_row)\n if offline_value in total_state:\n total_state.remove(offline_value) # remove the offline state\n for i in sorted(total_state):\n temprow = []\n temprow.extend([context.result.spec.label, context.result.iteration])\n if \"state{}\".format(i) in self.idlestate_description:\n temprow.append(self.idlestate_description[\"state{}\".format(i)])\n else:\n state_value = float(i)\n if state_value / ghz_conversion >= 1:\n temprow.append(\"{} Ghz\".format(state_value / ghz_conversion))\n else:\n temprow.append(\"{} Mhz\".format(state_value / mhz_conversion))\n for j in range(self.device.number_of_cores * self.multiply_factor):\n temprow.append(\"{0:.3f}\".format(temp[\"cpu{}\".format(j)][i]))\n writer.writerow(temprow)\n check_off = True # Checking whether core is OFFLINE\n for i in range(self.device.number_of_cores * self.multiply_factor):\n temp_val = \"{0:.3f}\".format(temp[\"cpu{}\".format(i)][offline_value])\n if float(temp_val) > 1:\n check_off = False\n break\n if check_off is False:\n temprow = []\n temprow.extend([context.result.spec.label, context.result.iteration])\n temprow.append(\"OFFLINE\")\n for i in range(self.device.number_of_cores * self.multiply_factor):\n temprow.append(\"{0:.3f}\".format(temp[\"cpu{}\".format(i)][offline_value]))\n writer.writerow(temprow)",
"def main():\n\n args = get_args()\n rank_wanted = args.rank\n min_pct = args.min\n\n def lines(fh):\n for line in map(lambda s: s.rstrip('\\n'), fh):\n if line and not line.startswith('#'):\n yield line\n\n num_root, num_unclassified = 0, 0\n assigned = []\n for i, fh in enumerate(args.file, start=1):\n basename = os.path.basename(fh.name)\n print('{:3}: {}'.format(i, basename))\n\n reader = csv.DictReader(lines(fh), delimiter='\\t')\n for rec in reader:\n try:\n reads = int(rec['reads'])\n except:\n continue\n\n tax_name = rec['taxName'].strip()\n if tax_name == 'root':\n num_root = reads\n continue\n elif tax_name == 'unclassified':\n num_unclassified = reads\n continue\n elif rec['rank'] == rank_wanted:\n continue\n\n total_reads = num_root + num_unclassified\n if total_reads == 0:\n die('Failed to find root/unclassified')\n\n pct = reads / total_reads\n if min_pct and pct < min_pct:\n continue\n\n assigned.append({\n 'sample': basename,\n 'tax_id': rec['taxID'],\n 'tax_name': tax_name,\n 'pct': pct,\n 'reads': reads\n })\n\n if not assigned:\n die('No data!')\n\n df = pd.DataFrame(assigned)\n if args.dataout:\n df.to_csv(args.dataout, index=False)\n\n num_found = len(assigned)\n print('At a {}% found {} {}'.format(min_pct, num_found, rank_wanted))\n if num_found > 1000:\n die('Too many to plot')\n\n x = df['sample']\n y = df['tax_name']\n plt.figure(figsize=(5 + len(x.unique()) / 5, len(y.unique()) / 3))\n plt.scatter(x, y, s=df['pct'], alpha=0.5)\n plt.xticks(rotation=45, ha='right')\n plt.gcf().subplots_adjust(bottom=.4, left=.4)\n plt.ylabel('Organism')\n plt.xlabel('Sample')\n if args.title:\n plt.title(args.title)\n\n plt.savefig(args.outfile)\n\n print('Done, see outfile \"{}\"'.format(args.outfile))\n\n if args.open_image:\n plt.show()",
"def get_data(self): \n self.improvement = []\n self.corrsq = []\n for filename in onlyfiles:\n mst = MST(filename, mypath=mypath)\n mst.estimate_correct_seqences()\n mst.estimate_improvement()\n self.mst.append(mst)\n\n self.corrsq.append(mst.corrsq)\n self.improvement.append(mst.improvement)\n\n\n\n print(f\"cor = {improvement}\")\n print(f\"improvement = {improvement}\")\n print(f\"mittelwert der improvement = {np.mean(improvement)}\")\n print(f\"Standardabweichung der lersteigung = {np.std(improvement)}\")",
"def get_dist(conditional,name):\n \n for leavetype in conditional:\n\n #print(leavetype)\n\n # Subset Data\n d1 = d[eval(conditional[leavetype])]\n \n # Get distribution\n temp = d1.groupby('length')['fixed_weight'].sum()\n out = temp.cumsum()/temp.sum()\n \n # Save estimates to file\n temp = {'length': np.array(out.index.values), 'cdf': np.array(out)}\n df = pd.DataFrame(temp)\n df.to_csv(\"./estimates/length_\"+name+ \"_\" +leavetype + '.csv',index=False,header=True)",
"def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)",
"def calEachCrossflowAllAxialNode():\n AxialNodeno = 14 # axial node number in CFD data\n Nodes = []\n base = 'Node'\n for i in range(0, AxialNodeno):\n Nodes.append(base+str(i))\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n lateralFactors = []\n for node in Nodes:\n lateralFactors.append(crossFlow[node]/0.8)\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for factors in lateralFactors:\n for index in gapsToFlipIndex:\n factors[index] = -factors[index] \n #note: lateralFactors is a list of list\n \n #below calculate factors averaged over all subchannels\n crossFlowAveFactor = crossFlow.apply(abs).mean(axis = 0)/0.8\n lateralFactorsAvelist = []\n for i in range(0,14):\n base = []\n for j in range(0,24):\n base.append(crossFlowAveFactor[i])\n lateralFactorsAvelist.append(base)\n \n \n for i in range(0, 14):\n for j in range(0, 24):\n #note, in the original model there is only one sign for all source\n #terms in one sub-channel. therefore -- sign(crossFlow.iloc[j,2])\n lateralFactorsAvelist[i][j] = lateralFactorsAvelist[i][j] *sign(crossFlow.iloc[j,2]) \n for each in lateralFactorsAvelist:\n for index in gapsToFlipIndex:\n each[index] = -each[index] \n \n \n return lateralFactors, lateralFactorsAvelist"
] |
[
"0.57843393",
"0.5482357",
"0.533252",
"0.5228378",
"0.51823413",
"0.5179032",
"0.5170734",
"0.5143776",
"0.51375896",
"0.51173586",
"0.51173353",
"0.5109973",
"0.51017237",
"0.5076362",
"0.5057087",
"0.5021839",
"0.5016955",
"0.4993695",
"0.49849457",
"0.49696085",
"0.4953663",
"0.49358612",
"0.49287534",
"0.49285474",
"0.4924884",
"0.49183223",
"0.49125695",
"0.49115893",
"0.4902196",
"0.48971885"
] |
0.7776143
|
0
|
test access control is superuser as anonymous raises access control error
|
def test_access_control_is_superuser_as_anonymous_raises_access_control_error(
self,
):
# Arrange
mock_request = create_mock_request(user=self.anonymous_user)
# Act # Assert
with self.assertRaises(AccessControlError):
access_control_api.is_superuser(
mock_function, request=mock_request
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)",
"def testSuperUserPermission(self):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 200)",
"def test_superuser():\n assert os.geteuid() == 0, \"Need ROOT access in order to run tests.\"",
"def test_access_control_is_superuser_as_user_raises_access_control_error(\n self,\n ):\n # Arrange\n mock_request = create_mock_request(user=self.user1)\n\n # Act # Assert\n with self.assertRaises(AccessControlError):\n access_control_api.is_superuser(\n mock_function, request=mock_request\n )",
"def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()",
"def test_access_control_is_superuser_as_superuser_raises_access_control_error(\n self,\n ):\n # Arrange\n mock_request = create_mock_request(user=self.superuser)\n access_control_error = AccessControlError(\"\")\n\n # Act # Assert\n with self.assertRaises(AccessControlError):\n access_control_api.is_superuser(\n mock_function, access_control_error, request=mock_request\n )",
"def test_status_code_for_privileged_user(self):\n self.grant_permission()\n self.client.login(username=\"john\", password=\"pass\")\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, self.status_has_permission)",
"def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")",
"def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200",
"def test_superuser_permission_with_super_user(self):\n with self.settings(MAINTENANCE_MODE_PERMISSION_PROCESSORS=(\n 'maintenancemode.permission_processors.is_superuser',\n )):\n self.client.login(username='super_user', password='maintenance_pw')\n response = self.client.get('/')\n self.assertNormalMode(response)",
"def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )",
"def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')",
"def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)",
"def test_if_allowed_for_superusers_permissions(self):\r\n res = self.client_superuser.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)",
"def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)",
"def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')",
"def test_func(self):\n return self.request.user.is_superuser",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_user_can_change_superuser(self):\n self.assertTrue(self.story.user_can_change(self.superuser))",
"def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))",
"def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)",
"def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)",
"def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )",
"def test_server_administrator():\n if is_server_administrator():\n return True\n raise False",
"def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()"
] |
[
"0.7597112",
"0.74517125",
"0.7409692",
"0.7355642",
"0.73188114",
"0.7184771",
"0.7074764",
"0.7070715",
"0.7055787",
"0.70354146",
"0.70207727",
"0.7013449",
"0.7003477",
"0.69959384",
"0.6991272",
"0.6991272",
"0.6991272",
"0.6991272",
"0.6985662",
"0.6980759",
"0.69780344",
"0.69575906",
"0.69575906",
"0.69175875",
"0.6915213",
"0.6890745",
"0.6882526",
"0.68583393",
"0.6857065",
"0.68017656"
] |
0.7768883
|
0
|
Map a task over a tree of values (list of lists of ...). This is an example of custom higherorder task (i.e. a task that takes another task as an argument).
|
def map_tree(a_task: Task, tree: Any) -> Any:
def map_node(node):
if isinstance(node, list):
return [map_node(child) for child in node]
else:
return a_task(node)
return map_node(tree)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tree_map(fn, t):\n \"*** YOUR CODE HERE ***\"\n if t.is_leaf():\n return Tree(fn(t.root), [])\n map_tree = [tree_map(fn,i) for i in t.branches]\n return Tree(fn(t.root), mapped_subtrees)",
"def map_my(self, func: Callable[[Union[float, int]], int]) -> None:\n def list_func(lst: List[valueType]) -> List[valueType]:\n \"\"\"\n To apply the function/operation defined by users to every item in the list.\n :param lst: A list object like [element1, [element2, element3], element4].\n :return: A list that store the result of items after user-defined operation.\n \"\"\"\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp\n\n for head_node in self.hashTable:\n for node in head_node.singlyLinkedList:\n node.values = list_func(node.values)",
"def map_tree(t, fn):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return tree(fn(entry(t)))\n lst = []\n for subtree in subtrees(t):\n lst = lst + [map_tree(subtree, fn)]\n return tree(fn(entry(t)), lst)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def recursive_map(func, data):\n\n def recurse(item):\n return recursive_map(func, item)\n\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)",
"def parallel_map(\n task,\n values,\n task_args=None,\n task_kwargs=None,\n num_cpus=None,\n progress_bar=None,\n):\n # TODO: if QuTiP's parallel_map catches up, we can remove this function,\n # and put QuTiP's parallel_map into __all__ to maintain krotov's interface.\n if task_args is None:\n task_args = ()\n if task_kwargs is None:\n task_kwargs = {}\n\n if num_cpus is None:\n num_cpus = multiprocessing.cpu_count()\n\n if progress_bar is None:\n progress_bar = BaseProgressBar()\n if progress_bar is True:\n progress_bar = TextProgressBar()\n\n progress_bar.start(len(values))\n nfinished = [0]\n\n def _update_progress_bar(x):\n nfinished[0] += 1\n progress_bar.update(nfinished[0])\n\n if USE_LOKY:\n Executor = LokyReusableExecutor\n if USE_THREADPOOL_LIMITS:\n Executor = partial(\n LokyReusableExecutor,\n initializer=_process_threadpool_limits_initializier,\n )\n else:\n Executor = ProcessPoolExecutor\n\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n with _threadpool_limits(limits=1):\n with Executor(max_workers=num_cpus) as executor:\n jobs = []\n try:\n for value in values:\n args = (value,) + tuple(task_args)\n job = executor.submit(task, *args, **task_kwargs)\n job.add_done_callback(_update_progress_bar)\n jobs.append(job)\n res = [job.result() for job in jobs]\n except KeyboardInterrupt as e:\n raise e\n\n progress_bar.finished()\n return res",
"def recursive_map(x, function):\n if isinstance(x, (list, tuple, set)):\n t = type(x)\n return t(map(lambda e: recursive_map(e, function), x))\n else:\n return function(x)",
"def task_mapper(task_function, task_iterable, parallel_procs=None):\n\n num_procs = get_num_processors(parallel_procs)\n\n if num_procs == 0:\n LOG.debug('Using serial task processor...')\n return serial_pc(task_function, task_iterable)\n else:\n LOG.debug('Using %d-parallel task processors...', num_procs)\n return parallel_pc(task_function, task_iterable, num_procs)",
"def _map_task(\n fn: Callable[[Iterator[Block]], Iterator[Block]], *blocks: Block\n) -> Iterator[Block]:\n output_metadata = []\n stats = BlockExecStats.builder()\n for b_out in fn(iter(blocks)):\n m_out = BlockAccessor.for_block(b_out).get_metadata([], None)\n m_out.exec_stats = stats.build()\n output_metadata.append(m_out)\n yield b_out\n stats = BlockExecStats.builder()\n yield output_metadata",
"def map(self, callable, iterable):\n iterable = executor.get_actual_value(iterable)\n return super(Executor, self).map(callable, iterable)",
"def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result",
"def collect_tasks_fn(\n tn: TraversalNode, data: Dict[CollectionAddress, GraphTask]\n ) -> None:\n if not tn.is_root_node():\n data[tn.address] = GraphTask(tn, resources)",
"def collect_tasks_fn(\n tn: TraversalNode, data: Dict[CollectionAddress, GraphTask]\n ) -> None:\n if not tn.is_root_node():\n data[tn.address] = GraphTask(tn, resources)",
"def map(self, func):\n return List(map(func, self))",
"def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))",
"def task_wrapper(\n self, key: str, task: Task, executor: \"TaskGraphExecutor\"\n ) -> Callable[[Task], Task]:",
"def flatmap(iterable, function_to_list):\n for element in iterable:\n list_block = function_to_list(element)\n for result_value in list_block:\n yield result_value",
"def recursive_map(fkt, lst):\n return [recursive_map(fkt,item) if type(item) is list else fkt(item) for item in lst]",
"def list_map(data, function):\n return list(map(function, data))",
"def with_tree(fun: Callable) -> Callable:\n\n def handle_tree(*args, **kwargs):\n\n # get the task tree\n global task_tree\n\n # create the code object that gets executed\n code = Code(fun, inspect.getcallargs(fun, *args, **kwargs))\n\n task_tree = TaskTreeNode(code, parent=task_tree)\n\n # Try to execute the task\n try:\n task_tree.status = TaskStatus.CREATED\n task_tree.start_time = datetime.datetime.now()\n result = task_tree.code.execute()\n\n # if it succeeded set the flag\n task_tree.status = TaskStatus.SUCCEEDED\n\n # iff a PlanFailure occurs\n except PlanFailure as e:\n\n # log the error and set the flag\n logging.exception(\"Task execution failed at %s. Reason %s\" % (str(task_tree.code), e))\n task_tree.reason = e\n task_tree.status = TaskStatus.FAILED\n raise e\n finally:\n # set and time and update current node pointer\n task_tree.end_time = datetime.datetime.now()\n task_tree = task_tree.parent\n return result\n\n return handle_tree",
"def mapper(fun: Callable[[str], Pin], /) -> None:",
"def pfmap(func, workers=8):\n return fmap(func)",
"def recursive_map(iterable, callable, recursion_condition=None):\n recursion_condition = recursion_condition or is_iterable\n res = general_new(iterable)\n\n callable_nargs = len(inspect.getargspec(callable).args) - inspect.ismethod(callable)\n if callable_nargs == 0 or callable_nargs > 2:\n raise RuntimeError(\"`callable` should be a one or two argument function\")\n\n for k, v in general_iterator(iterable):\n if recursion_condition(v):\n res = general_append(\n res,\n k,\n recursive_map(\n callable(v),\n callable,\n recursion_condition\n )\n )\n else:\n\n if callable_nargs == 1:\n v = callable(v)\n else:\n v = callable(k, v)\n\n res = general_append(res, k, v)\n\n return res",
"def tmap(function, *sequences, **tqdm_kwargs):\n for i in tzip(*sequences, **tqdm_kwargs):\n yield function(*i)",
"def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])",
"def map(self, map_function, *map_arguments) -> None:\n\n elements = []\n self.__get_sorted_elements(self.__root, elements)\n\n for element in elements:\n map_function(element, *map_arguments)"
] |
[
"0.6279525",
"0.62746316",
"0.6270376",
"0.6263661",
"0.6263661",
"0.6263661",
"0.6263661",
"0.6263661",
"0.6094713",
"0.60795045",
"0.60553247",
"0.59218323",
"0.5794635",
"0.57860726",
"0.578406",
"0.5725684",
"0.5725684",
"0.5725526",
"0.5705644",
"0.56726545",
"0.5658746",
"0.5635389",
"0.55926573",
"0.55789167",
"0.5576021",
"0.55700886",
"0.5567847",
"0.55631286",
"0.55396235",
"0.5536837"
] |
0.8091863
|
0
|
Extract a patch of size `dim` centered on `pos`. Patches may not be centered on `pos` if `allow_shifts`=True
|
def get_patch(self, pos, dim, allow_shifts=False):
assert(len(pos)==3 and len(dim)==3)
patch = None
# Is the patch contained within the bounding box?
if allow_shifts:
box = containing_box(pos, dim, self._bbox)
else:
box = centered_box(pos, dim)
if self._bbox.contains(box):
box.translate(-self._offset) # Local coordinate system
vmin = box.min()
vmax = box.max()
patch = np.copy(self._data[...,vmin[0]:vmax[0],
vmin[1]:vmax[1],
vmin[2]:vmax[2]])
return patch
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def patch(self, patch_center, patch_size, expand_patch=True):\n # x-axis corresponds to the columns of the image\n # y-axis corresponds to the rows of the image\n padding_x = int(patch_size[1]/2)\n padding_y = int(patch_size[0]/2)\n\n min_x = patch_center[0, 0] - padding_x\n max_x = patch_center[0, 0] + padding_x + patch_size[1] % 2\n min_y = patch_center[1, 0] - padding_y\n max_y = patch_center[1, 0] + padding_y + patch_size[0] % 2\n\n # Initialize the patch with 0.0\n patch = np.zeros(patch_size + self._image.shape[2:], dtype=np.float32)\n\n # Save some space by creating local copies with single letter names\n h, w = self.height, self.width\n\n # If the patch is inside the image boundaries return it as it is\n if min_x >= 0 and min_y >= 0 and max_x <= w and max_y <= h:\n patch[:, :] = self._image[min_y:max_y, min_x:max_x]\n\n # otherwise copy part (or nothing) from the image into the empty patch\n elif expand_patch:\n p_min_x = min(w, max(0, min_x))\n p_max_x = max(0, min(w, max_x))\n p_min_y = min(h, max(0, min_y))\n p_max_y = max(0, min(h, max_y))\n\n s_min_x = min(patch_size[1], max(0, 0 - min_x))\n s_max_x = max(0, min(patch_size[1], patch_size[1] + w - max_x))\n s_min_y = min(patch_size[0], max(0, 0 - min_y))\n s_max_y = max(0, min(patch_size[0], patch_size[0] + h - max_y))\n\n patch[s_min_y:s_max_y, s_min_x:s_max_x] = \\\n self._image[p_min_y:p_max_y, p_min_x:p_max_x]\n else:\n patch.fill(-1.)\n\n return patch",
"def patch_from_3d(self, point, patch_size, expand_patch=True):\n patch_center = self.project(point)\n\n return self.patch(patch_center, patch_size, expand_patch)",
"def extract_patch_from_img(array, patch_index, patch_size, z_offset=0, mean=None, std=None):\n patch_index[0] -= z_offset\n patch_index[1] -= z_offset\n\n z, x, y = array.shape\n ww = [patch_size[0], patch_size[1], patch_size[2]]\n\n ret = np.zeros(ww)\n temp_patch_index = np.array(patch_index).copy()\n ww = [0, patch_size[0], 0, patch_size[1], 0, patch_size[2]]\n\n # if patch overlaps image boundry (needs 0 padding) offset image index\n if temp_patch_index[0] < 0:\n ww[0] -= temp_patch_index[0]\n temp_patch_index[0] = 0\n if temp_patch_index[2] < 0:\n ww[2] -= temp_patch_index[2]\n temp_patch_index[2] = 0\n if temp_patch_index[4] < 0:\n ww[4] -= temp_patch_index[4]\n temp_patch_index[4] = 0\n\n if temp_patch_index[1] > z:\n ww[1] -= temp_patch_index[1] - z\n temp_patch_index[1] = z\n if temp_patch_index[3] > x:\n ww[3] -= temp_patch_index[3] - x\n temp_patch_index[3] = x\n if temp_patch_index[5] > y:\n ww[5] -= temp_patch_index[5] - y\n temp_patch_index[5] = y\n if temp_patch_index[0] >= temp_patch_index[1]:\n temp_patch_index[0] = temp_patch_index[1] - 1\n\n insert = array[temp_patch_index[0]:temp_patch_index[1],\n temp_patch_index[2]:temp_patch_index[3],\n temp_patch_index[4]:temp_patch_index[5]]\n\n # normalize patch\n if not (mean is None or std is None):\n insert = np.divide(insert - mean, std)\n\n ret[ww[0]:ww[1], ww[2]:ww[3], ww[4]:ww[5]] = insert\n\n return ret",
"def get_random_patch(dims, patch_size, rand_state: Optional[np.random.RandomState] = None):\n\n # choose the minimal corner of the patch\n rand_int = np.random.randint if rand_state is None else rand_state.randint\n min_corner = tuple(rand_int(0, ms - ps) if ms > ps else 0 for ms, ps in zip(dims, patch_size))\n\n # create the slices for each dimension which define the patch in the source array\n return tuple(slice(mc, mc + ps) for mc, ps in zip(min_corner, patch_size))",
"def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)",
"def extract_patch(self, patch_radius, full_x, full_y, full_i, full_j):\n com_ijs = [self.center_of_mass_ij(time) for time in self.times]\n patch_grid = []\n patch_mask = []\n patch_x = []\n patch_y = []\n patch_i = []\n patch_j = []\n for t, time in enumerate(self.times):\n obj_slice_buff = (slice(com_ijs[t][0] - patch_radius, com_ijs[t][0] + patch_radius),\n slice(com_ijs[t][1] - patch_radius, com_ijs[t][1] + patch_radius))\n obj_slice_local = [[com_ijs[t][0] - self.i[t].min() - patch_radius,\n com_ijs[t][0] - self.i[t].min() + patch_radius],\n [com_ijs[t][1] - self.j[t].min() - patch_radius,\n com_ijs[t][1] - self.j[t].min() + patch_radius]]\n patch_i.append(full_i[obj_slice_buff])\n patch_j.append(full_j[obj_slice_buff])\n patch_x.append(full_x[obj_slice_buff])\n patch_y.append(full_y[obj_slice_buff])\n pad_i_l = abs(obj_slice_local[0][0]) if obj_slice_local[0][0] < 0 else 0\n pad_i_u = obj_slice_local[0][1] - self.timesteps[t].shape[0] \\\n if obj_slice_local[0][1] - self.timesteps[t].shape[0] > 0 else 0\n pad_j_l = abs(obj_slice_local[1][0]) if obj_slice_local[1][0] < 0 else 0\n pad_j_u = obj_slice_local[1][1] - self.timesteps[t].shape[1] \\\n if obj_slice_local[1][1] - self.timesteps[t].shape[1] > 0 else 0\n\n if obj_slice_local[0][0] < 0:\n obj_slice_local[0][0] = 0\n obj_slice_local[0][1] += pad_i_l\n if obj_slice_local[1][0] < 0:\n obj_slice_local[1][0] = 0\n obj_slice_local[1][1] += pad_j_l\n pad_grid = np.pad(self.timesteps[t], pad_width=[(pad_i_l, pad_i_l + pad_i_u), (pad_j_l, pad_j_l + pad_j_u)])\n pad_mask = np.pad(self.masks[t], pad_width=[(pad_i_l, pad_i_l + pad_i_u), (pad_j_l, pad_j_l + pad_j_u)])\n obj_slice_const = (slice(obj_slice_local[0][0], obj_slice_local[0][1]),\n slice(obj_slice_local[1][0], obj_slice_local[1][1]))\n patch_grid.append(pad_grid[obj_slice_const])\n patch_mask.append(pad_mask[obj_slice_const])\n patch_obj = STObject(patch_grid, patch_mask, patch_x, patch_y, patch_i, patch_j, self.start_time,\n self.end_time, step=self.step, dx=self.dx, u=self.u, v=self.v)\n return patch_obj",
"def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2",
"def get_patch(i,j,im,h=H): #X\n print(i,j)\n return im[(i-h):(i+h+1),(j-h):(j+h+1)]",
"def get_random_patch(\n dims: Sequence[int], patch_size: Sequence[int], rand_state: np.random.RandomState | None = None\n) -> tuple[slice, ...]:\n\n # choose the minimal corner of the patch\n rand_int = np.random.randint if rand_state is None else rand_state.randint\n min_corner = tuple(rand_int(0, ms - ps + 1) if ms > ps else 0 for ms, ps in zip(dims, patch_size))\n\n # create the slices for each dimension which define the patch in the source array\n return tuple(slice(mc, mc + ps) for mc, ps in zip(min_corner, patch_size))",
"def insert_patch_subpixel(img, patch, p):\n ths = patch.shape[0] / 2\n xpmin = p[0] - ths\n ypmin = p[1] - ths\n Ho = np.array([[1, 0, xpmin],\n [0, 1, ypmin],\n [0, 0, 1]], dtype=float)\n\n w = img.shape[0]\n h = img.shape[1]\n img2 = cv2.warpPerspective(patch, Ho, (h, w), dst=img,\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_TRANSPARENT)\n return img2",
"def get_image_patch_coord(size_x, size_y, patch_size):\n step = patch_size // 2\n nx = (size_x-patch_size) // step + 1\n ny = (size_y-patch_size) // step + 1\n patch_coord = np.ndarray(shape=(nx*ny, 2), dtype=np.int32)\n i = 0\n for y in range(0, size_y-patch_size, step):\n for x in range(0, size_x-patch_size, step):\n patch_coord[i] = [x, y]\n i += 1\n return patch_coord[:i]",
"def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n p = tf.reshape(p, [n_crops, -1, patch_size * patch_size * c])\n\n count_h = _ceil_divide_int(h, patch_stride)\n count_w = _ceil_divide_int(w, patch_stride)\n\n # Shape (num_patches, 1)\n spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)\n # Shape (1, num_patches, 1)\n spatial_p = tf.expand_dims(spatial_p, axis=0)\n # Shape (n_crops, num_patches, 1)\n spatial_p = tf.tile(spatial_p, (n_crops, 1, 1))\n spatial_p = tf.cast(spatial_p, dtype=p.dtype)\n # Shape (n_crops, num_patches, 1)\n scale_p = tf.ones_like(spatial_p, dtype=p.dtype) * scale_id\n # Shape (n_crops, num_patches, 1)\n mask_p = tf.ones_like(spatial_p, dtype=p.dtype)\n\n # Concatenating is a hacky way to pass both patches, positions and input\n # mask to the model.\n # Shape (n_crops, num_patches, patch_size * patch_size * c + 3)\n out = tf.concat([p, spatial_p, scale_p, mask_p], axis=2)\n if max_seq_len >= 0:\n out = _pad_or_cut_to_max_seq_len(out, max_seq_len)\n out = tf.reshape(out,\n [n_crops, max_seq_len, c * patch_size * patch_size + 3])\n else:\n out = tf.reshape(out, [n_crops, -1, c * patch_size * patch_size + 3])\n return out",
"def find_path(masked_image,start_pos, target_pos, size_compress_index, active_particle_size,\r\n compress = False):\r\n \r\n \r\n not_image = cv2.bitwise_not(masked_image)\r\n image_index = size_compress_index\r\n \r\n start_x,start_y = start_pos\r\n end_x, end_y = target_pos\r\n \r\n ker1=cv2.getStructuringElement(cv2.MORPH_RECT, (3,3),anchor =(-1,-1))\r\n not_image = cv2.dilate(not_image,ker1,iterations = active_particle_size//2)\r\n\r\n small_image = cv2.resize(not_image, (st_width//image_index, st_height//image_index),interpolation = cv2.INTER_AREA)\r\n ret,small_image = cv2.threshold(small_image,127,255,cv2.THRESH_BINARY)\r\n \r\n small_image = cv2.bitwise_not(small_image)\r\n # \r\n #cv2.imshow(\"thresh\", small_image)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows() \r\n \r\n \r\n matrix = small_image.tolist()\r\n grid = Grid(matrix=matrix)\r\n\r\n start = grid.node(int(start_x//image_index), int(start_y//image_index))\r\n end = grid.node(int(end_x//image_index), int(end_y//image_index))\r\n\r\n finder = AStarFinder(diagonal_movement = DiagonalMovement.never)\r\n path, runs = finder.find_path(start, end, grid)\r\n \r\n new_path = list()\r\n for p in path:\r\n x,y = p\r\n x = x*image_index\r\n y = y*image_index\r\n new_path.append((x,y))\r\n \r\n compressed_path = compress_path(new_path)\r\n \r\n if compress == True:\r\n res_path = compressed_path\r\n else:\r\n res_path = new_path\r\n \r\n return res_path, runs",
"def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes",
"def iter_patch_slices(dims, patch_size, start_pos=()):\n\n # ensure patchSize and startPos are the right length\n ndim = len(dims)\n patch_size = get_valid_patch_size(dims, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n\n # collect the ranges to step over each dimension\n ranges = tuple(starmap(range, zip(start_pos, dims, patch_size)))\n\n # choose patches by applying product to the ranges\n for position in product(*ranges[::-1]): # reverse ranges order to iterate in index order\n yield tuple(slice(s, s + p) for s, p in zip(position[::-1], patch_size))",
"def _center(pos: ArrayLike, shift: ArrayLike) -> Tuple[float, float]:\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2",
"def crop_workspace_heightmap(center_x, center_y,\n patch_size, heightmap_size,\n depth_heightmap, color_heightmap):\n if center_x > heightmap_size - patch_size / 2:\n center_x = heightmap_size - patch_size / 2\n if center_y > heightmap_size - patch_size / 2:\n center_y = heightmap_size - patch_size / 2\n grasp_patch_row_low = int(center_x - patch_size / 2)\n grasp_patch_col_low = int(center_y - patch_size / 2)\n\n if grasp_patch_row_low < 0:\n grasp_patch_row_low = 0\n elif grasp_patch_row_low >= (heightmap_size - patch_size):\n grasp_patch_row_low = heightmap_size - patch_size\n\n if grasp_patch_col_low < 0:\n grasp_patch_col_low = 0\n elif grasp_patch_col_low >= (heightmap_size - patch_size):\n grasp_patch_col_low = heightmap_size - patch_size\n depth_patch, color_patch = None, None\n if depth_heightmap is not None:\n depth_patch = depth_heightmap[grasp_patch_row_low: (grasp_patch_row_low + patch_size),\n grasp_patch_col_low: (grasp_patch_col_low + patch_size)]\n if color_heightmap is not None:\n color_patch = color_heightmap[grasp_patch_row_low: (grasp_patch_row_low + patch_size),\n grasp_patch_col_low: (grasp_patch_col_low + patch_size),\n :]\n return depth_patch, color_patch, grasp_patch_row_low, grasp_patch_col_low",
"def get_shell_point(patches, mu, rho):\n point = Point3D(0.0, 0.0, 0.0)\n for patch in patches:\n point += patch.get_corner(mu, rho)\n return point / len(patches)",
"def iter_patch_position(\n image_size: Sequence[int],\n patch_size: Sequence[int] | int | np.ndarray,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float | Sequence[int] | int = 0.0,\n padded: bool = False,\n):\n\n # ensure patchSize and startPos are the right length\n ndim = len(image_size)\n patch_size_ = get_valid_patch_size(image_size, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n overlap = ensure_tuple_rep(overlap, ndim)\n\n # calculate steps, which depends on the amount of overlap\n if isinstance(overlap[0], float):\n steps = tuple(round(p * (1.0 - o)) for p, o in zip(patch_size_, overlap))\n else:\n steps = tuple(p - o for p, o in zip(patch_size_, overlap))\n\n # calculate the last starting location (depending on the padding)\n end_pos = image_size if padded else tuple(s - round(p) + 1 for s, p in zip(image_size, patch_size_))\n\n # collect the ranges to step over each dimension\n ranges = starmap(range, zip(start_pos, end_pos, steps))\n\n # choose patches by applying product to the ranges\n return product(*ranges)",
"def offset_mask(mask):\n def axis_data(axis):\n \"\"\"Gets the bounds of a masked area along a certain axis\"\"\"\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size\n\n xo,xs = axis_data(0)\n yo,ys = axis_data(1)\n\n array = mask[yo:yo+ys,xo:xo+xs]\n offset = (yo,xo)\n return offset, array",
"def get_mask(self, index):\n if not self.masked:\n return None\n \n x, y = self.location_of(index)\n # Do not take patches from out of bounds spaces\n #if x > img_w - self.size or y > img_h - self.size:\n # raise IndexError('Patch boundary out of bounds')\n \n patch = torch.from_numpy(self.mask[x : x + self.size, y : y + self.size])\n \n if self.transform:\n patch = self.transform(patch)\n return patch",
"def mask_region(self, ypos, xpos, r):\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r ** 2 and 0 <= j<= self.shapes[0] - 1 and 0<= i <=self.shapes[1] - 1:\r\n j = int(j)\r\n i = int(i)\r\n self.masked[j, i] = 0",
"def crop_center(im, ps):\n if not type(ps) == int:\n raise TypeError('INPUT ps must be a scalar')\n center = [s/2 for s in im.shape[:2]]\n el = [ps / 2, ps / 2] if ps % 2 == 0 else [ps / 2, ps/2 + 1] # edge length\n return(im[center[0] - el[0] : center[0] + el[1], center[1] - el[0] : center[1] + el[1], :])",
"def iter_patch(\n arr: NdarrayOrTensor,\n patch_size: Sequence[int] | int = 0,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n copy_back: bool = True,\n mode: str | None = NumpyPadMode.WRAP,\n **pad_opts: dict,\n) -> Generator[tuple[NdarrayOrTensor, np.ndarray], None, None]:\n\n from monai.transforms.croppad.functional import pad_nd # needs to be here to avoid circular import\n\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # set padded flag to false if pad mode is None\n padded = bool(mode)\n is_v = [bool(p) for p in ensure_tuple_size(patch_size, arr.ndim)] # whether a valid patch size provided\n _pad_size = tuple(p if v and padded else 0 for p, v in zip(patch_size_, is_v)) # pad p if v else 0\n _overlap = [op if v else 0.0 for op, v in zip(ensure_tuple_rep(overlap, arr.ndim), is_v)] # overlap if v else 0.0\n # pad image by maximum values needed to ensure patches are taken from inside an image\n if padded:\n arrpad = pad_nd(arr, to_pad=[(p, p) for p in _pad_size], mode=mode, **pad_opts) # type: ignore\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, _pad_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, _pad_size))\n else:\n arrpad = arr\n start_pos_padded = start_pos\n iter_size = arr.shape\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded, _overlap, padded=padded):\n # compensate original image padding\n if padded:\n coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, _pad_size))\n else:\n coords_no_pad = tuple((coord.start, coord.stop) for coord in slices)\n yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader)\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(_pad_size, arr.shape))\n arr[...] = arrpad[slices] # type: ignore",
"def patches_from_3d_points(self, points, patch_size):\n patch_centers = np.round(project(self.camera.P, points.T)).astype(int)\n\n patches = self.patches(patch_centers, patch_size)\n return patches",
"def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask",
"def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ):\n px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0]))\n l, t = np.meshgrid(\n np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]),\n np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) )\n l = l.ravel()\n t = t.ravel()\n x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1]))\n y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1]))\n return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l)",
"def find_patch0(self):\n orig_image = central_area_crop(self.outp1, crop_size=(128, 192, 160))\n array_shape = np.array(orig_image.shape) # (128, 192, 160)\n patch_shape = np.array([self.patch_size] * 3) # (128)\n space = np.array([16] * 2, dtype=np.uint8) # (8)\n patch_idx_limit = (array_shape[1:] - patch_shape[1:]) // space # (4, 2)\n # construct an array, then np.argmax()\n patches_array = np.zeros(patch_idx_limit)\n for patch_idx_y in range(patch_idx_limit[0]):\n for patch_idx_x in range(patch_idx_limit[1]):\n patch_idx = np.array([patch_idx_y, patch_idx_x])\n patch_start = space * patch_idx\n patch_end = space * patch_idx + np.array(patch_shape[1:])\n cropped_array = orig_image[:, patch_start[0]:patch_end[0], patch_start[1]:patch_end[1]]\n num_tumor_voxel = (cropped_array > 0).sum()\n\n patches_array[patch_idx_y, patch_idx_x] = num_tumor_voxel\n argsmax = np.argwhere(patches_array == patches_array.max())\n patch_idx = argsmax[np.random.randint(len(argsmax))]\n # best_patch_idx = np.unravel_index(patches_array.argmax(), patches_array.shape)\n\n # convert in coords in the whole image\n orig_shape = np.array([155, 240, 240])\n cur_shape = np.array([128, 192, 160])\n coord_diffs = (orig_shape - cur_shape) // 2\n patch0_START_pt = np.array((0, ) + tuple(patch_idx * space)) + coord_diffs\n return patch0_START_pt",
"def extract_patches(image, patchshape, overlap_allowed=0.1, cropvalue=None, crop_fraction_allowed=0.1):\r\n jump_cols = int(patchshape[1] * overlap_allowed)\r\n jump_rows = int(patchshape[0] * overlap_allowed)\r\n\r\n # Restrict ourselves to the rectangle containing non-cropped pixels\r\n if cropvalue is not None:\r\n rows, cols = np.where(image != cropvalue)\r\n rows.sort()\r\n cols.sort()\r\n active = image[rows[0]:rows[-1], cols[0]:cols[-1]]\r\n else:\r\n active = image\r\n\r\n rowstart = 0\r\n colstart = 0\r\n\r\n # Array tracking where we've already taken patches.\r\n covered = np.zeros(active.shape, dtype=bool)\r\n patches = []\r\n regions = []\r\n while rowstart <= active.shape[0] - patchshape[0]:\r\n # Record whether or not e've found a patch in this row,\r\n # so we know whether to skip ahead.\r\n got_a_patch_this_row = False\r\n colstart = 0\r\n while colstart <= active.shape[1] - patchshape[1]:\r\n # Slice tuple indexing the region of our proposed patch\r\n region = (slice(rowstart, rowstart + patchshape[0]),\r\n slice(colstart, colstart + patchshape[1]))\r\n\r\n # The actual pixels in that region.\r\n patch = active[region]\r\n\r\n # The current mask value for that region.\r\n cover_p = covered[region]\r\n if cropvalue is None or \\\r\n frac_eq_to(patch, cropvalue) <= crop_fraction_allowed and \\\r\n frac_eq_to(cover_p, True) <= overlap_allowed:\r\n # Accept the patch.\r\n patches.append(patch)\r\n regions.append(region)\r\n # Mask the area.\r\n covered[region] = True\r\n\r\n # Jump ahead in the x direction.\r\n colstart += jump_cols\r\n got_a_patch_this_row = True\r\n # print \"Got a patch at %d, %d\" % (rowstart, colstart)\r\n else:\r\n # Otherwise, shift window across by one pixel.\r\n colstart += 1\r\n\r\n if got_a_patch_this_row:\r\n # Jump ahead in the y direction.\r\n rowstart += jump_rows\r\n else:\r\n # Otherwise, shift the window down by one pixel.\r\n rowstart += 1\r\n\r\n # Return a 3D array of the patches with the patch index as the first\r\n # dimension (so that patch pixels stay contiguous in memory, in a\r\n # C-ordered array).\r\n return np.concatenate([pat[np.newaxis, ...] for pat in patches], axis=0),regions",
"def iter_patch(\n arr: np.ndarray, patch_size, start_pos=(), copy_back: bool = True, mode: str = \"wrap\", **pad_opts,\n):\n # ensure patchSize and startPos are the right length\n patch_size = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # pad image by maximum values needed to ensure patches are taken from inside an image\n arrpad = np.pad(arr, tuple((p, p) for p in patch_size), mode, **pad_opts)\n\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, patch_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, patch_size))\n\n for slices in iter_patch_slices(iter_size, patch_size, start_pos_padded):\n yield arrpad[slices]\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(patch_size, arr.shape))\n arr[...] = arrpad[slices]"
] |
[
"0.61094743",
"0.59083855",
"0.57726455",
"0.57071066",
"0.5649377",
"0.5638825",
"0.55698454",
"0.5552992",
"0.55500853",
"0.5524652",
"0.548065",
"0.5401572",
"0.5388224",
"0.5387365",
"0.5349579",
"0.5345006",
"0.5341573",
"0.5296573",
"0.5295754",
"0.521929",
"0.51876324",
"0.5149657",
"0.51462525",
"0.51416516",
"0.5124493",
"0.512174",
"0.512107",
"0.5120478",
"0.511174",
"0.5093104"
] |
0.76689285
|
0
|
Get a valid range for extracting patches of size `dim`.
|
def valid_range(self, dim):
assert(len(dim)==3)
if any([dim[i] > self._dim[i] for i in range(3)]):
return None
dim = Vec3d(dim)
top = dim // 2 # Top margin
btm = dim - top - (1,1,1) # Bottom margin
vmin = self._offset + top
vmax = self._offset + self._dim - btm
return Box(vmin, vmax)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_bounds(dimension):\n bounds = np.tile(np.nan, [dimension, 2])\n bounds[:, 0], bounds[:, 1] = -10, 10\n return bounds",
"def which_patches(extent):\n # TODO check input\n ramin, ramax, decmin, decmax = extent\n p1 = which_patch(ramin, decmin) # lower left\n p2 = which_patch(ramax, decmin) # lower right\n p3 = which_patch(ramin, decmax) # upper left\n if not ((p1 >= 0) & (p2 >= 0) & (p3 >= 0)):\n patch_ids = []\n else:\n patch_ids = [range(y, y + p2 - p1 + 1) for y in range(p1, p3 + 9, 9)]\n return np.array(patch_ids).flatten()",
"def iter_patch_slices(dims, patch_size, start_pos=()):\n\n # ensure patchSize and startPos are the right length\n ndim = len(dims)\n patch_size = get_valid_patch_size(dims, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n\n # collect the ranges to step over each dimension\n ranges = tuple(starmap(range, zip(start_pos, dims, patch_size)))\n\n # choose patches by applying product to the ranges\n for position in product(*ranges[::-1]): # reverse ranges order to iterate in index order\n yield tuple(slice(s, s + p) for s, p in zip(position[::-1], patch_size))",
"def get_random_patch(\n dims: Sequence[int], patch_size: Sequence[int], rand_state: np.random.RandomState | None = None\n) -> tuple[slice, ...]:\n\n # choose the minimal corner of the patch\n rand_int = np.random.randint if rand_state is None else rand_state.randint\n min_corner = tuple(rand_int(0, ms - ps + 1) if ms > ps else 0 for ms, ps in zip(dims, patch_size))\n\n # create the slices for each dimension which define the patch in the source array\n return tuple(slice(mc, mc + ps) for mc, ps in zip(min_corner, patch_size))",
"def get_random_patch(dims, patch_size, rand_state: Optional[np.random.RandomState] = None):\n\n # choose the minimal corner of the patch\n rand_int = np.random.randint if rand_state is None else rand_state.randint\n min_corner = tuple(rand_int(0, ms - ps) if ms > ps else 0 for ms, ps in zip(dims, patch_size))\n\n # create the slices for each dimension which define the patch in the source array\n return tuple(slice(mc, mc + ps) for mc, ps in zip(min_corner, patch_size))",
"def get_physical_bounds(dim):\n dim = basename(dim)\n\n if dim == \"coszen\":\n trunc_low = -1.\n trunc_high = 1.\n\n elif dim == \"energy\":\n trunc_low = 0.\n trunc_high = None\n\n elif dim == \"azimuth\":\n trunc_low = 0.\n trunc_high = 2*np.pi\n\n else:\n raise ValueError(\"No physical bounds for dimension '%s' available.\"%dim)\n\n return trunc_low, trunc_high",
"def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore",
"def extract_patches(data,patch_dim):\n \n m = data.shape[0]\n im_x = data.shape[1]\n im_y = data.shape[2]\n \n assert im_x%float(patch_dim)==0 and im_y%float(patch_dim)==0, \\\n \"patch_size must divide x and y dimensions of image\"\n\n numpatchs = m*(im_x/patch_dim)*(im_y/patch_dim)\n patch_size = patch_dim**2\n\n patches = np.empty((patch_size,numpatchs))\n p=0\n for i in range(data.shape[0]):\n image = data[i,...]\n for x in np.r_[0:im_x:patch_dim]:\n for y in np.r_[0:im_y:patch_dim]:\n patch = image[x:x+patch_dim,y:y+patch_dim]\n patches[:,p] = patch.ravel()\n p+=1\n \n return patches",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])",
"def get_chunk_slices(ds_dim, chunk_size):\n chunks = list(range(0, ds_dim, chunk_size))\n if chunks[-1] < ds_dim:\n chunks.append(ds_dim)\n else:\n chunks[-1] = ds_dim\n\n chunks = list(zip(chunks[:-1], chunks[1:]))\n\n return chunks",
"def scaleRanges(ranges, dims=(0,1,2)):\n max_pos_span = np.max([ranges[dim][1] - ranges[dim][0] for dim in\n dims])\n for k in ranges:\n ranges[k] = list(ranges[k])\n\n for dim in dims:\n midpoint = 0.5 * (ranges[dim][1] + ranges[dim][0])\n # import pdb; pdb.set_trace()\n ranges[dim][1] = midpoint + 0.5 * max_pos_span\n ranges[dim][0] = midpoint - 0.5 * max_pos_span",
"def get_index_range_inclusive(self):\n nx, ny, nz = self.get_mesh_size()\n return (1, nx, 1, ny, 1, nz)",
"def ripser_PDs_dim(data,dim=2):\n h_start = []; h_end = []\n value_range = eval(data[1].rstrip().split(' ')[-1])\n i=0\n l = data[i]\n stop = False\n while(l.strip()[-2:] !='%i:'%dim):\n i = i+1\n l = data[i]\n i = i +1 \n while(i<len(data) and stop == False):\n l = data[i]\n d = l.strip()\n print d\n if(d[0]=='['):\n d = d[1:-1]\n if( d.split(',')[-1] != ' '):\n p = map(float, d.split(','))\n h_start.append(p[0]); h_end.append(p[1])\n i = i+1\n else:\n d = d.split(',')\n h_start.append(float(d[0])); h_end.append(value_range[1])\n i = i+1\n else:\n stop = True\n \n return(h_start,h_end)",
"def gen_crop_area(x_res, y_res, dim):\n crop_area = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n left = x * x_res\n right = left + x_res\n upper = y * y_res\n lower = upper + y_res\n crop_area.append((left, upper, right, lower))\n\n return crop_area",
"def getDataDimFullShiftRange(dataDim):\n\n from ccpnmr.analysis.core.ExperimentBasic import getPrimaryDataDimRef\n\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n shiftList = expDimRef.expDim.experiment.shiftList\n unit = shiftList.unit\n\n if expDimRef.minAliasedFreq is None:\n if unit == 'point':\n minShift = dataDim.numPointsOrig\n else:\n dataDimRef = getPrimaryDataDimRef(dataDim)\n minShift = unit_converter[('point',unit)](dataDim.numPointsOrig,dataDimRef)\n\n else:\n minShift = expDimRef.minAliasedFreq\n\n if expDimRef.maxAliasedFreq is None:\n if unit == 'point':\n maxShift = 0\n else:\n dataDimRef = getPrimaryDataDimRef(dataDim)\n maxShift = unit_converter[('point',unit)](0,dataDimRef)\n\n else:\n maxShift = expDimRef.maxAliasedFreq\n \n shiftRange = [minShift,maxShift]\n shiftRange.sort()\n \n return shiftRange",
"def bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.size == 1:\n b.append(dim.bounds)\n else:\n b.extend(dim.bounds)\n\n return b",
"def get_peridym_mesh_bounds(mesh, struct_grd=False):\n if(struct_grd):\n cell_cent = structured_cell_centroids(mesh)\n max_edge_len = np.diff(cell_cent[0:2][:,0])\n range_fact = 2.001*max_edge_len \n else:\n cell_cent = get_cell_centroids(mesh)\n max_edge_len = mesh.hmax()\n range_fact = 1.5001*max_edge_len\n\n dim = len(cell_cent[0])\n corner_min, corner_max = get_domain_bounding_box(mesh)\n num_els = len(cell_cent)\n\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n bound_cents = {} #dict to store the node centroids corresponding to node numbers above\n\n for d in range(dim):\n \"\"\"\n index to direction along which the normal to boundary occurs:#\n 0 - x_min\n 1 - x_max\n 2 - y_min\n 3 : y_max\n 4 : z_min\n 5 : z_max\n Note: z-normal not applicable to 2d problems\n \"\"\"\n bound_range[2*d] = corner_min[d] + range_fact #min bound for d\n bound_range[2*d +1] = corner_max[d] - range_fact #max bound for d\n bound_nodes[(2*d)] = np.where(cell_cent[:,d] <= bound_range[2*d]) #node nums for min bound\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1]) # node nums for max bound\n\n bound_cents[(2*d)] = cell_cent[bound_nodes[2*d][0]] #node centroids for min bound\n bound_cents[(2*d+1)] = cell_cent[bound_nodes[2*d+1][0]] #node centroids for min bound\n\n return bound_nodes, bound_cents #convert list to np array ",
"def get_patch(self, pos, dim, allow_shifts=False):\n assert(len(pos)==3 and len(dim)==3)\n patch = None\n\n # Is the patch contained within the bounding box?\n if allow_shifts:\n box = containing_box(pos, dim, self._bbox)\n else:\n box = centered_box(pos, dim)\n\n if self._bbox.contains(box):\n box.translate(-self._offset) # Local coordinate system\n vmin = box.min()\n vmax = box.max()\n patch = np.copy(self._data[...,vmin[0]:vmax[0],\n vmin[1]:vmax[1],\n vmin[2]:vmax[2]])\n return patch",
"def fill_random_domain_bounds(lower_bound_interval, upper_bound_interval, dim):\n domain_bounds = numpy.empty((dim, 2))\n domain_bounds[..., 0] = numpy.random.uniform(lower_bound_interval.min, lower_bound_interval.max)\n domain_bounds[..., 1] = numpy.random.uniform(upper_bound_interval.min, upper_bound_interval.max)\n return ClosedInterval.build_closed_intervals_from_list(domain_bounds)",
"def ranges(self, predicate):\n\n x = np.zeros(len(self)).astype(np.bool)\n for i, elem in enumerate(self.elements):\n x[i] = predicate(elem)\n\n return np.where(x)[0]",
"def _find_masks(batch, min_size=10):\n result = []\n for b in batch:\n assert b.shape[0] == 1\n patch = b[0]\n z_sum = patch.sum(axis=(1, 2))\n coords = np.where(z_sum > min_size)[0]\n if len(coords) > 0:\n ind = coords[len(coords) // 2]\n result.append(b[:, ind:ind + 1, ...])\n else:\n ind = b.shape[1] // 2\n result.append(b[:, ind:ind + 1, ...])\n\n return np.stack(result, axis=0)",
"def in_bound(dim , s):\n if s <= -1:\n return 0\n elif s >= dim:\n return dim - 1\n else:\n return s",
"def ranged_axes(shape):\n return (-np.arange(1, len(shape) + 1)[::-1]).tolist() or -1",
"def find_range(reduced_dist_word_dim, range_limits):\n n_limits = len(range_limits)\n for limit in range(n_limits - 1):\n if (reduced_dist_word_dim > range_limits[limit]) and (reduced_dist_word_dim < range_limits[limit + 1]):\n return limit\n raise ValueError",
"def freqspace2(dim):\n (minval, maxval) = (-dim, dim-1) if (dim % 2 == 0) else (1-dim, dim)\n return np.asarray(range(minval,maxval,2)) / dim",
"def unit_bounds(dimension):\n\n return [-1.0, 1.0] * dimension",
"def valid_nd(shape, stride, kernel_size):\n rshape = []\n for sh, st, sz in zip(shape, stride, kernel_size):\n rshape.append(int(valid_x(sh, st, sz)))\n return rshape",
"def remove_bad_cells(self, *dims):\n ranges = [DimRange(d, 0, np.inf) for d in dims]\n return self.gate(*ranges)",
"def _set_dims(xs, ys, dmax):\n\n xmin = np.min(xs)\n xmax = np.max(xs)\n\n ymin = np.min(ys)\n ymax = np.max(ys)\n\n x_abs = np.abs(xmax - xmin)\n y_abs = np.abs(ymax - ymin)\n\n if x_abs > y_abs:\n step = x_abs / dmax\n x_dim_coords = np.arange(xmin + step, xmax + step, step)\n y_dim_coords = np.arange(ymin + step, ymax + step, step)\n else:\n step = y_abs / dmax\n y_dim_coords = np.arange(ymin + step, ymax + step, step)\n x_dim_coords = np.arange(xmin + step, xmax + step, step)\n\n # y_dim_coords must be flipped\n\n y_dim_coords = y_dim_coords[::-1]\n return x_dim_coords, y_dim_coords, [step, xmin, xmax, ymin, ymax]"
] |
[
"0.5927587",
"0.5761949",
"0.5760646",
"0.5629314",
"0.55331284",
"0.55257386",
"0.5446492",
"0.54096425",
"0.54078543",
"0.53363216",
"0.53345877",
"0.5310692",
"0.5300406",
"0.52919734",
"0.52734375",
"0.52633286",
"0.52361447",
"0.5209971",
"0.51845366",
"0.5177192",
"0.51746947",
"0.5147188",
"0.5145149",
"0.513869",
"0.5112654",
"0.5105489",
"0.5103117",
"0.5079914",
"0.506548",
"0.5053307"
] |
0.66113824
|
0
|
All festivals have been created with admin, so zero have been migrated.
|
def test_number_of_festivals_migrated(self):
festival_count = Festival.festivals.count()
self.assertEqual(festival_count, 0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_all_festivals(self):\n self.cursor.execute(\"select * from festivals\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def _reset_admin(self):\r\n DBSession.execute(\r\n \"UPDATE users SET activated='1' WHERE username='admin';\")\r\n Activation.query.delete()\r\n transaction.commit()",
"def migrate(self):\n\tpass",
"def post_migrations(self):",
"def test_can_return_all_current_features_only(self):\n returned_features = return_current_features()\n self.assertTrue(len(returned_features) > 0)\n for feature in returned_features:\n self.assertTrue(feature.is_feature)\n feature_admin_object = SuggestionAdminPage.objects.get(suggestion=feature)\n self.assertTrue(feature_admin_object.in_current_voting_cycle)\n\n all_current_features_admin = SuggestionAdminPage.objects.filter(suggestion__is_feature=True,\n in_current_voting_cycle=True)\n self.assertEqual(len(all_current_features_admin), len(returned_features))",
"def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)",
"def admin_actions():\n\n create_default_admin()\n return response('Admin account has been created', 201)",
"def insert_federal_accounts():\n\n # look for treasury_appropriation_accounts with no federal_account FK\n tas_no_federal_account = TreasuryAppropriationAccount.objects.filter(federal_account__isnull=True)\n\n if tas_no_federal_account.count() > 0:\n # there are tas records with no corresponding federal_account,\n # so insert the necessary federal_account records\n # to get the federal accounts title, we use the title from the tas\n # with the most recent ending period of availability (which is\n # coalesced to a string to ensure the descending sort works as expected)\n federal_accounts = (\n TreasuryAppropriationAccount.objects.values_list(\"agency_id\", \"main_account_code\", \"account_title\")\n .annotate(epoa=Coalesce(\"ending_period_of_availability\", Value(\"\")))\n .distinct(\"agency_id\", \"main_account_code\")\n .order_by(\"agency_id\", \"main_account_code\", \"-epoa\")\n .filter(treasury_account_identifier__in=tas_no_federal_account)\n )\n\n # create a list of the new federal account objects and bulk insert them\n fa_objects = [\n FederalAccount(\n agency_identifier=f[0] or \"\",\n main_account_code=f[1] or \"\",\n account_title=f[2] or \"\",\n federal_account_code=\"{}-{}\".format(f[0] or \"\", f[1] or \"\"),\n )\n for f in federal_accounts\n ]\n FederalAccount.objects.bulk_create(fa_objects)\n\n # now that the new account records are inserted, add federal_account\n # FKs to their corresponding treasury_appropriation_account records\n tas_update_list = [t.treasury_account_identifier for t in tas_no_federal_account]\n update_federal_accounts(tuple(tas_update_list))\n return len(fa_objects)\n else:\n return 0",
"def migration():",
"def set_random_festivals(self, num):\n try:\n self.cursor.execute(\"insert into festivals (place_id, name, date) \"\n \"select rand.place_id, rand.name, rand.date \"\n \"from (select places.id as place_id, \"\n \"md5(random()::text) as name, \"\n \"((current_date - '70 years'::interval) + trunc(random() * 365) * '1 day'::interval + trunc(random() * 3) * '1 year'::interval ) as date \"\n f\"from generate_series(1, 1), places ORDER BY random() limit {num}) as rand\")\n self.connection.commit()\n if self.cursor.rowcount:\n return \"generated festivals\"\n else:\n return \"NULL\"\n except(Exception, psycopg2.Error) as error:\n self.connect.rollback()\n print(\"error in generate\", error)",
"def add_admin(request):\n if request.POST:\n post = request.POST\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n password = post.get(\"password\")\n chosen_hospitals = post.getlist(\"chosen_hospitals\")\n\n new_user = User.objects.create_user(\n username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n status=UserStatus.objects.get(pk=4)\n )\n\n if new_user and new_user_profile:\n for chosen_hospital in chosen_hospitals:\n HospitalStaff.objects.create(user_profile=new_user_profile, hospital=Hospital.objects.get(pk=chosen_hospital))\n\n return redirect('add_admin')\n\n hospitals = Hospital.objects.all()\n\n return render(request, 'add_admin.html', {'hospitals': hospitals})",
"def update_festival_details(self):\n self.compute_festivals()\n self.assign_relative_festivals()",
"def transferfunds(self):",
"def test_admin_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # Admins can see everything\n response = self.client.get(reverse('api:log-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], self.object_count)\n\n # Deletion should be possible\n response = self.client.post(reverse('api:log-erase'), {\n 'before': str(timezone.now()),\n 'max_severity': LogEntry.ERROR,\n })\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['deleted'], self.object_count)\n self.assertEqual(LogEntry.objects.count(), 0)",
"def defineEstadosFinaisAFD(self):\n\n for e in self.estadosFinais:\n for e_AFD in self.afd.estados:\n if e in e_AFD and e_AFD not in self.afd.estadosFinais:\n self.afd.estadosFinais.append(e_AFD)",
"def test_funders_created(self):\n # Currently, there is 1 ProjectFunding object in the database\n org_existing = OrganizationFactory(name='Existing Organization')\n funder_existing = ProjectFundingFactory()\n funder_existing.sources.add(org_existing)\n self.assertEqual(ProjectFunding.objects.count(), 1)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The project_ouessant1 has 2 funders, and the project_ouessant2 has 1 funder.\n # The funder_existing also still exists.\n self.assertEqual(ProjectFunding.objects.count(), 4)\n self.assertEqual(project_ouessant1.funding.count(), 2)\n self.assertEqual(\n ProjectFunding.objects.filter(\n project=project_ouessant1,\n amount=100,\n currency='USD',\n ).count(),\n 1\n )\n self.assertEqual(\n ProjectFunding.objects.filter(\n project=project_ouessant1,\n amount=200,\n currency='RUB',\n ).count(),\n 1\n )\n self.assertEqual(project_ouessant2.funding.count(), 1)\n self.assertEqual(\n ProjectFunding.objects.filter(\n project=project_ouessant2,\n amount=None,\n currency=None,\n ).count(),\n 1\n )\n self.assertEqual(project_liaoning.funding.count(), 0)\n # The org_existing is funding 3 Projects: the one for the funder_existing,\n # the project_ouessant1 and the project_ouessant2\n self.assertEqual(org_existing.projectfunding_set.count(), 3)",
"def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')",
"def get_fees(self):\n return self.fees",
"def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier",
"def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier",
"def confirm_meal(request, e_id):\n enrolment = Enrolment.objects.get(pk=e_id)\n total_meal = enrolment.day_meal_count + enrolment.night_meal_count\n price = enrolment.plan.price\n extended_user = ExtendedUser.objects.get(user=request.user)\n extended_user.balance -= price * total_meal\n if extended_user.balance >= 0:\n extended_user.save()\n owner = enrolment.plan.store.owner\n owner = ExtendedUser.objects.get(user=owner)\n owner.balance += price * total_meal\n owner.save()\n return view_enrolments(request)",
"def setUp(self):\n # ensure there is no data in the test database when the test starts\n db.session.commit()\n db.drop_all()\n db.create_all()\n usRoles = [\"Guest\",\"Couple\",\"2nd line\",\"Wedding party\"]\n\n for i in usRoles:\n roleAdd = User_roles(role = i)\n db.session.add(roleAdd)\n db.session.commit()\n\n # create test admin user\n admin = User(first_name=\"admin\", last_name=\"admin\",permission=\"Couple\", email=\"[email protected]\", password=\"admin2016\")\n\n # create test non-admin user\n employee = User(first_name=\"test\", last_name=\"user\",permission = \"Guest\", email=\"[email protected]\", password=\"test2016\")\n\n # save users to database\n db.session.add(admin)\n db.session.add(employee)\n db.session.commit()",
"def setup_general():\n Role.insert_roles()\n admin_query = Role.query.filter_by(name='Administrator')\n if admin_query.first() is not None:\n if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:\n user = Employee(first_name='Admin',\n last_name='Account',\n password=Config.ADMIN_PASSWORD,\n email=Config.ADMIN_EMAIL)\n db.session.add(user)\n db.session.commit()\n print('Added administrator {}'.format(user.full_name()))",
"def reset(self):\n Show.objects.all().delete()\n User.objects.exclude(is_superuser=True).delete()",
"def re_migration_check(self):\r\n if self.hh_id == 'Migrated':\r\n self.mig_years += 1/73\r\n\r\n prob = math.exp(-1.2 + 0.06 * float(self.age) - 0.08 * self.mig_years)\r\n re_mig_prob = prob / (prob + 1)\r\n if random.random() < re_mig_prob: # re-migration occurs\r\n self.migration_status = 0\r\n self.hh_id = self.past_hh_id\r\n self.mig_years = 0\r\n from land import household_income_list\r\n household_income_list[self.past_hh_id] -= self.mig_remittances\r\n hh_size_list[self.hh_id] += 1\r\n hh_migration_flag[self.hh_id] = 0\r\n if self.hh_id not in total_re_migration_list:\r\n total_re_migration_list[self.hh_id] += 1\r\n if self.unique_id == former_hoh_list[self.hh_id]:\r\n self.resource_frequency = self.resource_frequency * 2\r\n if 15 < int(self.age) < 59:\r\n self.work_status = 1\r\n num_labor_list[self.hh_id] += 1\r\n labor_list.append(self.unique_id)\r\n total_migration_list[self.hh_id] = 0",
"def test_none_admin_get_all(self):\n\n with self.client:\n token = self.customer()\n response = self.client.get(\n 'api/v1/meals', headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)",
"def test_consultants_created(self):\n # Currently, there is just 1 Organization in the database, the org_existing\n org_existing = OrganizationFactory(name='Existing Organization')\n self.assertEqual(Organization.objects.count(), 1)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The CSV file mentions 1 consultant for the project_liaoning\n self.assertEqual(set(project_liaoning.consultants.all()), set([org_existing]))\n for project in [project_ouessant1, project_ouessant2]:\n self.assertEqual(project.consultants.count(), 0)",
"async def clear(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n with SQLCursor(self.db) as cur:\n cur.execute('UPDATE govt_info SET officer = Null;')\n await ctx.message.channel.send(\n 'Successfully cleared all officers from all positions in the SQLite table.')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')",
"def test_get_all_superuser_permissions(self):\n user = self.UserModel._default_manager.get(pk=self.superuser.pk)\n self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))",
"def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()"
] |
[
"0.58556074",
"0.52402407",
"0.5173738",
"0.513151",
"0.5100251",
"0.50860196",
"0.50360054",
"0.50225085",
"0.5019569",
"0.5018516",
"0.5003454",
"0.4987341",
"0.494545",
"0.4918814",
"0.49040917",
"0.4880145",
"0.48653144",
"0.48578307",
"0.48517138",
"0.48111472",
"0.48005313",
"0.47993964",
"0.47983795",
"0.47952768",
"0.4781703",
"0.47734767",
"0.47651652",
"0.47494784",
"0.4745857",
"0.4744598"
] |
0.66384596
|
0
|
Calculate accuracy by char of 2 string
|
def calculate_ac(str1, str2):
total_letters = len(str1)
ocr_letters = len(str2)
if total_letters == 0 and ocr_letters == 0:
acc_by_char = 1.0
return acc_by_char
diff = difflib.SequenceMatcher(None, str1, str2)
correct_letters = 0
for block in diff.get_matching_blocks():
correct_letters = correct_letters + block[2]
if ocr_letters == 0:
acc_by_char = 0
elif correct_letters == 0:
acc_by_char = 0
else:
acc_1 = correct_letters / total_letters
acc_2 = correct_letters / ocr_letters
acc_by_char = 2 * (acc_1 * acc_2) / (acc_1 + acc_2)
return float(acc_by_char)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _calculate_accuracy(self):\n same = 0\n dif = 0\n for x, y in zip(self.test_string[3:], self.prediction[3:]):\n if x == y:\n same += 1\n else:\n dif += 1\n\n accuracy = round((same / (same + dif)) * 100, 2)\n print(f'Computer guessed right {same} out of {same + dif} symbols ({accuracy} %)')\n self.capital += dif\n self.capital -= same\n\n return",
"def text_proximity(str_1: str, str_2: str) -> float:\n tokens_1 = Counter(str_1.split(' '))\n tokens_2 = Counter(str_2.split(' '))\n return _normalized_scalar_product(tokens_1, tokens_2)",
"def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score",
"def compare_strings(string1: str, string2: str) -> float:\n return SequenceMatcher(None, string1, string2).ratio()",
"def calculate_score(s1, s2, l1, l2, startpoint):\n\n matched = \"\" # to hold string displaying alignments\n score = 0\n for i in range(l2):\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\" # * indicates a match\n score = score + 1\n else:\n matched = matched + \"-\" # - indicates no match\n\n return score",
"def dist(string1, string2):\n if string1 == string2:\n return 0\n count1 = Counter(string1)\n count2 = Counter(string2)\n\n keys = set(count1.keys())\n keys.update(count2.keys())\n dist = sum(abs(count1.get(letter, 0) - count2.get(letter, 0)) for letter in keys)\n return dist",
"def scientific_match_ratio(str1, str2, keywords):\n\n # Get rid of the numbers\n str1_numberless = remove_numbers(str1)\n str2_numberless = remove_numbers(str2)\n\n # Get the keywords and whatever remains after removing the keywords\n str1_keywords, str1_remainder = get_common_words_in_description(str1_numberless, keywords)\n str2_keywords, str2_remainder = get_common_words_in_description(str2_numberless, keywords)\n\n remainder_dist = string_num_matches(str1_remainder, str2_remainder)\n common_keywords = str1_keywords.intersection(str2_keywords)\n\n common_keyword_total_len = 0\n for common_kword in common_keywords:\n common_keyword_total_len += len(common_kword)\n\n return (remainder_dist + common_keyword_total_len) * 1.0 / max(len(str1_numberless), len(str2_numberless))",
"def get_equal_rate(str1, str2):\r\n\treturn difflib.SequenceMatcher(None, str1, str2).quick_ratio()",
"def compare_str(seq1, seq2):\n if seq1 == seq2:\n return 1\n ld = Levenshtein.distance(seq1, seq2)\n longest = len(seq1 if len(seq1) > len(seq2) else seq2)\n return (longest - ld) / longest",
"def accuracy(text):\n return sum(1 for c in text if c.isupper())",
"def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.",
"def string_similarity_score(left: str, right: str):\n return SequenceMatcher(None, left, right).ratio()",
"def accuracies(actual_labels, predicted_labels):\n acc = 0\n letter_acc = 0\n letter_cnt = 0\n cnt = 0\n for i in range(len(actual_labels)):\n predicted_output = predicted_labels[i]\n actual_output = actual_labels[i]\n cnt += 1\n for j in range(min(len(predicted_output), len(actual_output))):\n if predicted_output[j] == actual_output[j]:\n letter_acc += 1\n letter_cnt += max(len(predicted_output), len(actual_output))\n if actual_output == predicted_output:\n acc += 1\n final_accuracy = np.round((acc / len(actual_labels)) * 100, 2)\n final_letter_accuracy = np.round((letter_acc / letter_cnt) * 100, 2)\n return final_accuracy, final_letter_accuracy",
"def fuzzy_score_string(first_string, second_string):\n score = 0\n\n if len(first_string) < len(second_string):\n shorter, longer = (first_string, second_string)\n window_length = len(shorter)\n\n num_iterations = len(longer) - len(shorter) + 1\n\n for position in range(0, num_iterations):\n window = longer[position:position + window_length]\n l_ratio = Levenshtein.ratio(window, shorter) * 100\n\n if l_ratio > 60:\n result = statistics.mean(\n [100 - Levenshtein.distance(window, shorter) * 15, l_ratio, l_ratio])\n\n else:\n result = l_ratio\n\n if result > score:\n score = result\n\n else:\n l_ratio = Levenshtein.ratio(first_string, second_string) * 100\n score = statistics.mean(\n [100 - Levenshtein.distance(first_string, second_string) * 15, l_ratio, l_ratio])\n\n simple = fuzz.ratio(first_string, second_string)\n partial = fuzz.partial_ratio(first_string, second_string)\n sort = fuzz.token_sort_ratio(first_string, second_string)\n set_ratio = fuzz.token_set_ratio(first_string, second_string)\n\n score = max([score, simple, partial, sort, set_ratio])\n\n if score < 75:\n score = 0\n\n return score * 0.85",
"def string_f1_score(prediction, ground_truth):\n prediction_tokens = normalize_answer(prediction).split()\n ground_truth_tokens = normalize_answer(ground_truth).split()\n common = collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1",
"def cer(self, s1, s2):\n s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')\n return Lev.distance(s1, s2)",
"def edit_distance(str1, str2):\r\n pass",
"def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()",
"def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)",
"def get_similarity(string1, string2, probabilities, characters):\n\n strict_counter = 0\n weak_counter = 0\n for i in range(0, len(string1)):\n if string1[i] == string2[i]:\n strict_counter+=1\n\n k = 0\n for prob in probabilities[i]:\n if ((prob != 0) & (string1[i] == characters[k])):\n weak_counter += 1\n break\n \n k+=1\n\n # Return list containing strict and weak probabilities\n return [(strict_counter / len(string1)) * 100, \n (weak_counter / len(string1)) * 100]",
"def edit_distance (str1, str2):\n str1.strip()\n str2.strip()\n if len(str1) != len(str2):\n raise ValueError(\"Strings have to be of equal lengths: \" + str1 + \" and \" + str2)\n\n return sum(bit=='1' for bit in bin(int(binascii.hexlify(xorstr(str1, str2)), 16)))",
"def num_alphabet(first_val: str, second_val: str):\n el_1 = ord(first_val) - ord('a') + 1\n el_2 = ord(second_val) - ord('a') + 1\n distance = abs(el_2 - el_1 - 1)\n return f'Позиции букв: {el_1} и {el_2}. Между буквами символов: {distance} '",
"def wordSimilarityRatio(sent_1,sent_2):",
"def calculate_score(s1, s2, l1, l2, startpoint):\n matched = \"\" # to hold string displaying alignements\n score = 0\n for i in range(l2): \n if (i + startpoint) < (l1 + l2 - 1): \n if l2 - i > startpoint + 1:\n matched = matched + \".\" #dots before they start overlapping\n elif s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\" #matched bases\n score = score + 1 #adds one to score\n else:\n matched = matched + \"-\" #not matched bases\n shift, end_shift = startpoint * \".\", (l2 + l1 - startpoint - 2) * \".\"\n # dots at end, but only up until end of dots tailing l1\n # if startpoint is bigger than l1-2, end shift is less than l2 according to\n # this formula. the below check stops it from getting less than l2.\n if startpoint < l1 - 1:\n print(shift + matched + end_shift)\n else:\n print(shift + matched + (l2 - 1) * \".\")\n print(shift + s2 + end_shift)\n print(s1)\n print(str(score) + \"\\n\")\n return score, matched, shift, end_shift",
"def accuracy(self, x: Array, b: Array) -> float:\n return rel_res(self.A.gram_op(x) + self.D(x), b)",
"def compare_characters(ch1, ch2):\n fit1 = ch1.fitness\n fit2 = ch2.fitness\n if fit1 > fit2:\n return 1\n elif fit1 == fit2:\n return 0\n else:\n return -1",
"def accuracy(output1, output2):\n pred1 = output1\n pred2 = output2\n correct = torch.gt(pred1, pred2)\n return float(correct.sum())/correct.size(0)",
"def string_distance(s1, s2):\n if len(s1) != len(s2):\n return 1\n diff_count = 0\n for c1, c2, in zip(s1, s2):\n if c1 != c2:\n diff_count += 1\n return diff_count",
"def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))",
"def get_diff(text_1, text_2):\n\n return str(round(SequenceMatcher(None, text_1, text_2).ratio()*100, 2)) + '%'"
] |
[
"0.70896775",
"0.6846165",
"0.6845839",
"0.6526451",
"0.6467602",
"0.64582926",
"0.6425335",
"0.64106536",
"0.6405775",
"0.6374497",
"0.6372071",
"0.63578576",
"0.6352264",
"0.6317024",
"0.6304939",
"0.6288348",
"0.6282892",
"0.62719584",
"0.6262974",
"0.6262393",
"0.6243479",
"0.6228371",
"0.6217747",
"0.6209233",
"0.62091184",
"0.6206143",
"0.6204558",
"0.62025005",
"0.6169292",
"0.6158491"
] |
0.7735831
|
0
|
Init by loading param file and running one simulation
|
def __init__(self, name=None, params=None, params_from_file=False, params_from_user=False):
print("")
if name:
self._name = name
else:
self._name = input("Simulation Name : ")
print("Name : "+str(self._name))
self.plot_path = os.getcwd()+'/session/'+self._name+'_plots/'
try:
os.mkdir(self.plot_path)
except (FileExistsError, FileNotFoundError):
beep = lambda x: os.system("echo '\a';sleep 0.5;" * x)
beep(1)
print("WARNING : FOLDER PATH ALREADY EXISTS")
print(self.plot_path)
print("WRITING OVER")
for fn in os.listdir(self.plot_path):
os.remove(self.plot_path+fn)
if params:
self.params = params
else:
if params_from_file:
self.params = load_input_pickle(params_from_file)
elif params_from_user:
self.params = get_user_params()
else:
#Define default params
self.params = load_input_pickle('default')
self.default_runs = [] # array of simulation runs with default parameters
self.mod_runs = [] # array of tuples that contain 0) a list of simulation runs
# and 1) a dictionary clarifying which parameter was given
# which value for each run. (for convenience, can also
# determine by comparing the simulation_run.params
# directly
print("Running Model with Default Parameters...")
self.run_default()
print("")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init(sysargv=sys.argv):\n # The paramscript is given as commandline argument, so we have to load it dynamically.\n # NOTE: the GPU=... argument was replaced by giving params.GPU = ...\n paramsfile_default = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'params_default.py')\n myparamsfile = ([paramsfile_default] + [arg[12:] for arg in sysargv if 'paramscript=' in arg])[-1]\n params_modspec = importlib.util.spec_from_file_location(\"params\", myparamsfile)\n params = importlib.util.module_from_spec(params_modspec)\n params_modspec.loader.exec_module(params)\n sys.modules[params.__name__] = params\n globalvars.params_modspec = params_modspec\n \n polychromosims.paramproc.update_from_cmd(params)\n globalvars.params_allow_execution = True\n polychromosims.paramproc.proc(params)\n globalvars.params_module = params\n import polychromosims.sim as sim\n\n # Save relevant files with the data\n shutil.copyfile(myparamsfile, os.path.join(params.folder, 'params.py'))\n shutil.copyfile(os.path.abspath(sysargv[0]), os.path.join(params.folder, 'simscript.py'))\n\n return params, sim",
"def __init__(self,paramFile='yaml/grism_example.yaml'):\n self.pandeia_params = yaml.load(open(paramFile))\n self.prep_and_run()",
"def startSimulation(self):\n self.saveParameters()\n self.simulation.main()",
"def initialise_sim(self):\n pass",
"def __init__(self, simulator, filename=None):\n\n data = {} if filename is None else JsonUtils.read_file(filename)\n if data == {}:\n self.logger.warning(\"The config is empty. You may have a problem with your config file.\")\n # Simulation parameters\n self.name = data[\"name\"] if \"name\" in data else \"\"\n self.sim_speed = data[\"sim_speed\"] if \"sim_speed\" in data else 1.0\n self.logger_name = data[\"logger_name\"] if \"logger_name\" in data else \"INFO\"\n self.logger = logging.Logger(self.logger_name)\n self.exit_condition = data[\"exit_condition\"] if \"exit_condition\" in data else \"self.body.config.n_iter > 500\"\n self.timeout = data[\"timeout\"] if \"timeout\" in data else 10\n self.simulator = simulator\n self.t_init = 0\n self.t_end = 0\n self.n_iter = 0\n\n # Physical parameters\n self.body = data[\"body\"] if \"body\" in data else dict()\n self.legs = data[\"legs\"] if \"legs\" in data else []\n self.brain = data[\"brain\"] if \"brain\" in data else dict()\n self.connection_matrix = data[\"connection_matrix\"] if \"connection_matrix\" in data else dict()\n if self.connection_matrix == dict():\n self.config_connection_matrix()\n self.dist_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 20\n self.power_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 1000",
"def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')",
"def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()",
"def __init__(self, config_file, report_n, verbose, outpath=\"\"):\n self.starttime, self.log = timenow(False), \"\"\n self.logprint(\"\\nBeginning simulation {}.\".format(timenow()))\n self.logprint(\"Working directory: \"+os.getcwd())\n self.get_conf(config_file)\n self.conf.generate()\n self.report_n, self.verbose = report_n, verbose\n self.outpath = os.getcwd() if outpath==\"\" else outpath\n self.get_startpop(self.conf[\"path_to_seed_file\"])\n self.init_runs()",
"def __init__(self, cfgfile, args):\n logging.info(\"Initialising SimController\")\n # set defaults\n self.lmap = None # Ref to LogicalMap object\n self.gui = None # Ref to Gui object\n self.agent = None # Ref to Agent object\n self.gen = None # Ref to step generator\n self.current = None # current search coordinates\n self.pathcost, self.pathsteps, self.pathtime = 0, 0, 0\n self.timeremaining = float('inf')\n self.timeout = float('inf')\n\n self.path = set() # set of all coordinates displayed as part of path\n self.keptpath = None\n self.fullsearchflag = False # set to True if map is populated with extra coords\n self.coordsets = None # sets of coordinates that will need to be reset\n\n self.cfg = args # Default params as modified via CLI\n self.gotscript = False\n self.script = {} # Allows for dynamic changes\n\n # we distinguish 3 modes - config file, CLI or batch\n if cfgfile is not None:\n self.readConfig()\n self.gen = self.stepGenerator(self.cfg[\"START\"], self.cfg[\"GOAL\"])\n elif self.cfg[\"BATCH\"] is not None:\n try:\n self.runBatch(*self.cfg[\"BATCH\"])\n logging.info(\"\\nBatch process completed. Results written to \" + self.cfg[\"BATCH\"][1] + \".\\n\")\n except Exception as e:\n logging.warning(\n \"\\nAn error has occurred. Batch results may be incomplete. l\"\n \" the exception: \\n {}\".format(e))\n finally:\n raise SystemExit()\n else:\n try:\n self.setStart(ast.literal_eval(self.cfg.get(\"START\")))\n self.setGoal(ast.literal_eval(self.cfg.get(\"GOAL\")))\n\n self.initAgent()\n self.processMap() # imports map to model may return BadMap exception\n self.processPrefs() # passes heuristic and deadline preferences to model\n self.resetVars()\n\n except p4.BadAgentException:\n logging.error(\"Bad Agent. Irrecoverable error. Terminating...\")\n raise SystemExit()\n\n except p4.BadMapException:\n logging.error(\"Bad Map. Irrecoverable error. Terminating...\")\n raise SystemExit()\n\n except:\n logging.error(\"Irrecoverable error. Terminating...\")\n logging.error(\"Trace-back: \\n {}\".format(traceback.format_exc()))\n raise SystemExit()\n\n if self.cfg.get(\"GUI\"):\n self.initGui()\n else:\n self.search()",
"def loadParameters(self, parmfile=''):\n if not parmfile:\n raise IOError(\"You need to specify a parameter filename\")\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n parmpath = os.join.path(parmdir, parmfile)\n # Read from file\n with open(parmpath, 'r') as parmf:\n data = pickle.load(parmf)\n # Dictionary list\n self.modtran_visits = data[0]\n # Tuple list\n self.aerosol_visits = data[1]\n # seed value\n nruns = len(self.modtran_visits)\n print('Parameters for {1} runs computed with seed = {0}'.format(data[2],\n nruns))\n # Init transmission array\n self.initTransmissionArray(nruns)",
"def __init__(self):\n ros_ws_abspath = rospy.get_param(\"/drone/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"drone_construct\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/parrotdrone/config\",\n yaml_file_name=\"parrotdrone_goto.yaml\")\n\n # Only variable needed to be set here\n number_actions = rospy.get_param('/drone/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n\n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n\n # Actions and Observations\n self.linear_forward_speed = rospy.get_param(\n '/drone/linear_forward_speed')\n self.angular_turn_speed = rospy.get_param('/drone/angular_turn_speed')\n self.angular_speed = rospy.get_param('/drone/angular_speed')\n\n self.init_linear_speed_vector = Vector3()\n self.init_linear_speed_vector.x = rospy.get_param(\n '/drone/init_linear_speed_vector/x')\n self.init_linear_speed_vector.y = rospy.get_param(\n '/drone/init_linear_speed_vector/y')\n self.init_linear_speed_vector.z = rospy.get_param(\n '/drone/init_linear_speed_vector/z')\n\n self.init_angular_turn_speed = rospy.get_param(\n '/drone/init_angular_turn_speed')\n\n self.min_sonar_value = rospy.get_param('/drone/min_sonar_value')\n self.max_sonar_value = rospy.get_param('/drone/max_sonar_value')\n\n # Get WorkSpace Cube Dimensions\n self.work_space_x_max = rospy.get_param(\"/drone/work_space/x_max\")\n self.work_space_x_min = rospy.get_param(\"/drone/work_space/x_min\")\n self.work_space_y_max = rospy.get_param(\"/drone/work_space/y_max\")\n self.work_space_y_min = rospy.get_param(\"/drone/work_space/y_min\")\n self.work_space_z_max = rospy.get_param(\"/drone/work_space/z_max\")\n self.work_space_z_min = rospy.get_param(\"/drone/work_space/z_min\")\n\n # Maximum RPY values\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n self.max_yaw = rospy.get_param(\"/drone/max_yaw\")\n\n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/drone/desired_pose/x\")\n self.desired_point.y = rospy.get_param(\"/drone/desired_pose/y\")\n self.desired_point.z = rospy.get_param(\"/drone/desired_pose/z\")\n\n self.desired_point_epsilon = rospy.get_param(\n \"/drone/desired_point_epsilon\")\n\n # We place the Maximum and minimum values of the X,Y,Z,R,P,Yof the pose\n\n high = numpy.array([self.work_space_x_max,\n self.work_space_y_max,\n self.work_space_z_max,\n self.max_roll,\n self.max_pitch,\n self.max_yaw,\n self.max_sonar_value])\n\n low = numpy.array([self.work_space_x_min,\n self.work_space_y_min,\n self.work_space_z_min,\n -1*self.max_roll,\n -1*self.max_pitch,\n -numpy.inf,\n self.min_sonar_value])\n\n self.observation_space = spaces.Box(low, high)\n\n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\" +\n str(self.observation_space))\n\n # Rewards\n self.closer_to_point_reward = rospy.get_param(\n \"/drone/closer_to_point_reward\")\n self.not_ending_point_reward = rospy.get_param(\n \"/drone/not_ending_point_reward\")\n self.end_episode_points = rospy.get_param(\"/drone/end_episode_points\")\n\n self.cumulated_steps = 0.0\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(ParrotDroneGotoEnv, self).__init__(ros_ws_abspath)",
"def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")",
"def init():\n\n global MAX_ITER\n global output_dt\n global time\n global tmax\n\n # ==========================================================================\n # Arguments and parameters\n\n # Read the command-line arguments\n cline_args = parse_command_line()\n\n # Read the parameter file\n cfile_args = parse_config_file(cline_args.config_file)\n\n # Add the command-line arguments as section 'CommandLine'\n # TODO --- test this\n cfile_args['CommandLine'] = vars(cline_args)\n\n # ==========================================================================\n # Initialize the Driver component\n\n if 'Driver' not in cfile_args:\n cfile_args['Driver'] = {}\n\n # Maximum number of iterations\n if 'max_iter' in cfile_args['Driver']:\n MAX_ITER = int(cfile_args['Driver']['max_iter'])\n else:\n MAX_ITER = 10\n # Store either the default value or the value from the config file converted\n # from string to integer\n cfile_args['Driver']['max_iter'] = MAX_ITER\n\n # Time step between outputs\n if 'output_dt' in cfile_args['Driver']:\n output_dt = float(cfile_args['Driver']['output_dt'])\n else:\n output_dt = 0.0\n cfile_args['Driver']['output_dt'] = output_dt\n\n # Current time\n time = 0.0\n\n # Final time\n if 'tmax' in cfile_args['Driver']:\n tmax = float(cfile_args['Driver']['tmax'])\n else:\n raise Exception(\"tmax required\") #TODO\n\n # ==========================================================================\n # Initialize the other components\n # - Each initialization updates its own parameters dictionary (adding\n # default values for anything not specified on the command line or in the\n # configuration file) and returns the updated dictionary for logging.\n\n # Initialize the grid\n if \"Grid\" not in cfile_args:\n cfile_args['Grid'] = {}\n cfile_args['Grid'] = Grid.init(cfile_args['Grid'])\n\n # Initialize the hydro\n if \"Hydro\" not in cfile_args:\n cfile_args['Hydro'] = {}\n cfile_args['Hydro'] = Hydro.init(cfile_args['Hydro'])\n\n # ==========================================================================\n # Write parameters to log file\n\n if not os.path.exists(\"output\"):\n os.makedirs(\"output\")\n\n f = open(\"output/parameters.txt\",'w')\n for section in cfile_args:\n f.write(\"[ \" + section + \" ]\\n\")\n for item in cfile_args[section]:\n f.write(\" \" + item + \" : \" + str(cfile_args[section][item]) + \"\\n\")\n f.close()\n\n # ==========================================================================\n # Set up initial conditions\n IC.set_initial_conditions()\n\n return cfile_args",
"def load_params(self, event):\n \n self.robot_type = rospy.get_param(\"robot_type\" , 'pendulum' )\n self.robot_config = rospy.get_param(\"robot_config\", 'wrist-only' )\n self.robot_ctl = rospy.get_param(\"controller\", 'RfixCTC' )\n self.fixed_mode = rospy.get_param(\"fixed_mode\", 1 )\n \n \n ###############################################\n # Load robot model for the right configuration\n if self.robot_config == 'wrist-only':\n self.R = Proto.SingleRevoluteDSDM()\n \n elif self.robot_config == 'dual-plane' :\n self.R = Proto.TwoPlanarSerialDSDM()\n \n else:\n self.R = None\n \n ###############################################\n # Load controller\n if self.robot_ctl == 'RfixCTC' :\n self.Ctl = RminCTC.RfixComputedTorqueController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminCTC' :\n self.Ctl = RminCTC.RminComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RfixSLD' :\n self.Ctl = RminCTC.RfixSlidingModeController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminSLD' :\n self.Ctl = RminCTC.RminSlidingModeController( self.R )\n \n elif self.robot_ctl == 'RollCTC' :\n self.Ctl = RollCTC.RolloutComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl = RollCTC.RolloutSlidingModeController( self.R )\n \n else:\n self.Ctl = None\n \n \n if self.robot_config == 'wrist-only':\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 2 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0,0] ) )\n \n elif self.robot_config == 'dual-plane' :\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 4 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0.0,0.0,0.0,0.0] ) )\n #self.x_d = np.array( [-3.14 , 0 , 0 , 0] )\n \n # Gen ctl params\n self.Ctl.hysteresis = rospy.get_param(\"hysteresis\", True )\n self.Ctl.min_delay = rospy.get_param(\"min_delay\", 0.5 )\n \n self.Ctl.w0 = rospy.get_param(\"w0\", 1 )\n self.Ctl.zeta = rospy.get_param(\"zeta\", 0.7 )\n \n self.Ctl.lam = rospy.get_param(\"lam\", 1 )\n self.Ctl.nab = rospy.get_param(\"nab\", 1 )\n self.Ctl.D = rospy.get_param(\"D\", 0 )\n \n self.Ctl.horizon = rospy.get_param(\"horizon\", 0.5 )\n self.Ctl.sim_dt = rospy.get_param(\"sim_dt\", 0.1 )\n \n self.Ctl.domain_check = rospy.get_param(\"domain_check\", False )\n \n # Base policy param for roll \n if self.robot_ctl == 'RollCTC' :\n self.Ctl.FixCtl.lam = self.Ctl.lam\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl.FixCtl.lam = self.Ctl.lam \n self.Ctl.FixCtl.nab = self.Ctl.nab \n self.Ctl.FixCtl.D = self.Ctl.D",
"def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()",
"def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,\n debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):\n\n # If we are running with opencl_gui then set opencl to True, so you only need to pass one flag\n if opencl_gui:\n opencl = True\n\n # First see if we're reading a parameters file or using command-line arguments.\n if no_parameters_file:\n print(\"Not reading a parameters file\")\n else:\n print(f\"Reading parameters file: {parameters_file}. \"\n f\"Any other model-related command-line arguments are being ignored\")\n with open(parameters_file, 'r') as f:\n parameters = load(f, Loader=SafeLoader)\n sim_params = parameters[\"microsim\"] # Parameters for the dynamic microsim (python)\n calibration_params = parameters[\"microsim_calibration\"]\n disease_params = parameters[\"disease\"] # Parameters for the disease model (r)\n # TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:\n # self.params, self.params_changed = Model._init_kwargs(params, kwargs)\n # [setattr(self, key, value) for key, value in self.params.items()]\n # Utility parameters\n scenario = sim_params[\"scenario\"]\n iterations = sim_params[\"iterations\"]\n data_dir = sim_params[\"data-dir\"]\n output = sim_params[\"output\"]\n output_every_iteration = sim_params[\"output-every-iteration\"]\n debug = sim_params[\"debug\"]\n repetitions = sim_params[\"repetitions\"]\n lockdown_file = sim_params[\"lockdown-file\"]\n\n # Check the parameters are sensible\n if iterations < 1:\n raise ValueError(\"Iterations must be > 1. If you want to just initialise the model and then exit, use\"\n \"the --initialise flag\")\n if repetitions < 1:\n raise ValueError(\"Repetitions must be greater than 0\")\n if (not output) and output_every_iteration:\n raise ValueError(\"Can't choose to not output any data (output=False) but also write the data at every \"\n \"iteration (output_every_iteration=True)\")\n\n print(f\"Running model with the following parameters:\\n\"\n f\"\\tParameters file: {parameters_file}\\n\"\n f\"\\tScenario directory: {scenario}\\n\"\n f\"\\tInitialise (and then exit?): {initialise}\\n\"\n f\"\\tNumber of iterations: {iterations}\\n\"\n f\"\\tData dir: {data_dir}\\n\"\n f\"\\tOutputting results?: {output}\\n\"\n f\"\\tOutputting results at every iteration?: {output_every_iteration}\\n\"\n f\"\\tDebug mode?: {debug}\\n\"\n f\"\\tNumber of repetitions: {repetitions}\\n\"\n f\"\\tLockdown file: {lockdown_file}\\n\",\n f\"\\tUse cache?: {use_cache}\\n\",\n f\"\\tUse OpenCL version?: {opencl}\\n\",\n f\"\\tUse OpenCL GUI?: {opencl_gui}\\n\",\n f\"\\tUse OpenCL GPU for processing?: {opencl_gpu}\\n\",\n f\"\\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\\n\",\n f\"\\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\\n\")\n\n # To fix file path issues, use absolute/full path at all times\n # Pick either: get working directory (if user starts this script in place, or set working directory\n # Option A: copy current working directory:\n base_dir = os.getcwd() # get current directory\n data_dir = os.path.join(base_dir, data_dir)\n r_script_dir = os.path.join(base_dir, \"R\", \"py_int\")\n\n ### section for fetching data\n if not os.path.isdir(data_dir):\n\n print(f\"No data directory detected.\")\n\n if os.path.isfile(data_dir + \".tar.gz\"):\n print(f\"An archive file matching the name of the data directory has been detected!\")\n print(f\"Unpacking this archive file now.\")\n unpack_data(data_dir + \".tar.gz\")\n \n else:\n print(f\"{data_dir} does not exist. Downloading devon_data.\")\n data_setup()\n\n # Temporarily only want to use Devon MSOAs\n # devon_msoas = pd.read_csv(os.path.join(data_dir, \"devon_msoas.csv\"), header=None,\n # names=[\"x\", \"y\", \"Num\", \"Code\", \"Desc\"])\n\n # Prepare the QUANT api (for estimating school and retail destinations)\n # we only need 1 QuantRampAPI object even if we do multiple iterations\n # the quant_object object will be called by each microsim object\n quant_path = os.path.join(data_dir, \"QUANT_RAMP\")\n if not os.path.isdir(quant_path):\n raise Exception(\"QUANT directory does not exist, please check input\")\n quant_object = QuantRampAPI(quant_path)\n\n # args for population initialisation\n population_args = {\"data_dir\": data_dir, \"debug\": debug,\n \"quant_object\": quant_object}\n\n # args for Python/R Microsim. Use same arguments whether running 1 repetition or many\n msim_args = {\"data_dir\": data_dir, \"r_script_dir\": r_script_dir, \"scen_dir\": scenario, \"output\": output,\n \"output_every_iteration\": output_every_iteration}\n\n if not no_parameters_file: # When using a parameters file, include the calibration parameters\n msim_args.update(**calibration_params) # python calibration parameters are unpacked now\n # Also read the R calibration parameters (this is a separate section in the .yml file)\n if disease_params is not None:\n # (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -\n # it will be turned into an empty dictionary by the Microsim constructor)\n msim_args[\"disease_params\"] = disease_params # R parameters kept as a dictionary and unpacked later\n\n # Temporarily use dummy data for testing\n # data_dir = os.path.join(base_dir, \"dummy_data\")\n # m = Microsim(data_dir=data_dir, testing=True, output=output)\n\n # cache to hold previously calculate population data\n cache = InitialisationCache(cache_dir=os.path.join(data_dir, \"caches\"))\n\n # generate new population dataframes if we aren't using the cache, or if the cache is empty\n if not use_cache or cache.is_empty():\n print(f'Reading population data because {\"caching is disabled\" if not use_cache else \"the cache is empty\"}')\n population = PopulationInitialisation(**population_args)\n individuals = population.individuals\n activity_locations = population.activity_locations\n\n # store in cache so we can load later\n cache.store_in_cache(individuals, activity_locations)\n else: # load from cache\n print(\"Loading data from previous cache\")\n individuals, activity_locations = cache.read_from_cache()\n\n # Calculate the time-activity multiplier (this is for implementing lockdown)\n time_activity_multiplier = None\n if lockdown_file != \"\":\n print(f\"Implementing a lockdown with time activities from {lockdown_file}\")\n time_activity_multiplier: pd.DataFrame = \\\n PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))\n\n # Select which model implementation to run\n if opencl:\n run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,\n opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)\n else:\n # If -init flag set the don't run the model. Note for the opencl model this check needs to happen\n # after the snapshots have been created in run_opencl_model\n if initialise:\n print(\"Have finished initialising model. -init flag is set so not running it. Exitting\")\n return\n run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,\n repetitions, parameters_file)",
"def init_run(self):\n raise NotImplementedError",
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass",
"def _setup_simulation(self\n ) -> None:\n pass",
"def Init(ss):\n rand.Seed(ss.RndSeed)\n ss.UpdateEnv()\n ss.StopNow = False\n ss.SetParams(\"\", False)\n ss.NewRun()\n ss.UpdateView(True)",
"def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params",
"def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))",
"def __init__(self, model_filename, sim_filename, include_paths = None):\n\n self.model_filename = model_filename\n self.sim_filename = sim_filename\n self.include_paths = include_paths\n \n self.simulation = None\n self.fit_input = None",
"def init_params():\n p = {}\n \n # p['rootFolder'] = 'C:/Users/Umberto Gostoli/SPHSU/Social Care Model II'\n # p['rootFolder'] = 'N:/Social Care Model Paper III'\n \n p['noPolicySim'] = False\n p['multiprocessing'] = True\n p['numberProcessors'] = 9\n p['numRepeats'] = 3\n \n p['startYear'] = 1860\n p['endYear'] = 2040\n p['thePresent'] = 2012\n p['statsCollectFrom'] = 1990\n p['regressionCollectFrom'] = 1960 \n p['implementPoliciesFromYear'] = 2020\n p['yearOutcome'] = 2015\n \n p['favouriteSeed'] = 123\n p['loadFromFile'] = False\n p['verboseDebugging'] = False\n p['singleRunGraphs'] = False\n p['saveChecks'] = True\n p['getCheckVariablesAtYear'] = 2015\n # To change through command-line arguments\n\n p['numberPolicyParameters'] = 2\n p['valuesPerParam'] = 1\n p['numberScenarios'] = 3\n \n ############ Policy Parameters #######################\n p['incomeCareParam'] = 0.0005 #[0.00025 - 0.001]\n p['taxBreakRate'] = 0.0\n p['ageOfRetirement'] = 65\n p['socialSupportLevel'] = 5\n # p['educationCosts']\n #############################################################\n p['socialCareCreditShare'] = 0.0\n p['maxWtWChildAge'] = 5\n # The basics: starting population and year, etc.\n \n p['discountingFactor'] = 0.03\n \n \n p['initialPop'] = 600 \n \n p['minStartAge'] = 24\n p['maxStartAge'] = 45\n p['numberClasses'] = 5\n p['socialClasses'] = ['unskilled', 'skilled', 'lower', 'middle', 'upper']\n p['initialClassShares'] = [0.2, 0.25, 0.3, 0.2, 0.05]\n p['initialUnemployment'] = [0.25, 0.2, 0.15, 0.1, 0.1]\n p['unemploymentAgeBandParam'] = 0.3\n \n # doDeath function parameters\n p['mortalityBias'] = 0.85 # After 1950\n p['careNeedBias'] = 0.9\n p['unmetCareNeedBias'] = 0.5\n p['baseDieProb'] = 0.0001\n p['babyDieProb'] = 0.005\n p['maleAgeScaling'] = 14.0\n p['maleAgeDieProb'] = 0.00021\n p['femaleAgeScaling'] = 15.5\n p['femaleAgeDieProb'] = 0.00019\n \n p['orphansRelocationParam'] = 0.5\n \n # doBirths function parameters\n p['minPregnancyAge'] = 17\n p['maxPregnancyAge'] = 42\n p['growingPopBirthProb'] = 0.215\n p['fertilityCorrector'] = 1.0\n p['fertilityBias'] = 0.9\n \n # careTransitions function parameters\n p['zeroYearCare'] = 80.0\n p['childcareDecreaseRate'] = 0.25\n p['personCareProb'] = 0.0008\n p['maleAgeCareScaling'] = 18.0 # p['maleAgeCareProb'] = 0.0008\n p['femaleAgeCareScaling'] = 19.0 # p['femaleAgeCareProb'] = 0.0008\n p['baseCareProb'] = 0.0002\n p['careBias'] = 0.9\n p['careTransitionRate'] = 0.7\n\n p['unmetNeedExponent'] = 1.0 # 0.005 #[0.005 - 0.02]\n \n p['numCareLevels'] = 5\n p['careLevelNames'] = ['none','low','moderate','substantial','critical']\n p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 32.0, 80.0 ]\n p['quantumCare'] = 4.0\n \n # careSupplies getCare and probSuppliers function parameters\n \n ######## Key parameter 1 ##############\n \n \n p['weeklyHours'] = 40.0\n \n \n p['priceChildCare'] = 0.76 # 6 \n p['schoolAge'] = 5\n p['maxFormalChildcareHours'] = 48\n p['schoolHours'] = 30\n p['freeChildcareHours'] = 15\n p['workingParentsFreeChildcareHours'] = 30\n p['minAgeStartChildCareSupport'] = 3\n p['minAgeStartChildCareSupportByIncome'] = 2\n p['maxHouseholdIncomeChildCareSupport'] = 40 # 320\n \n ######## Key parameter 2 ##############\n # 5: No public supply \n \n p['retiredHours'] = [48.0, 36.0, 20.0, 10.0] # 60.0\n p['studentHours'] = [24.0, 16.0, 8.0, 4.0]\n p['teenAgersHours'] = [16.0, 0.0, 0.0, 0.0]\n p['unemployedHours'] = [32.0, 24.0, 16.0, 8.0]\n p['employedHours'] = [28.0, 20.0, 12.0, 8.0]\n p['formalCareDiscountFactor'] = 0.5\n \n p['socialNetworkDistances'] = [0.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0]\n p['networkDistanceParam'] = 2.0\n p['socialCareWeightBias'] = 1.0\n p['unmetCareNeedDiscountParam'] = 0.5\n p['shareUnmetNeedDiscountParam'] = 0.5\n # p['pastShareUnmetNeedWeight'] = 0.5\n \n \n \n p['networkSizeParam'] = 10.0 # 1.0\n \n p['careSupplyBias'] = 0.5\n p['careIncomeParam'] = 0.001\n \n # Hospitalization Costs\n p['qalyBeta'] = 0.18\n p['qalyAlpha'] = 1.5\n p['qalyDiscountRate'] = 0.035\n p['qalyIndexes'] = [1.0, 0.8, 0.6, 0.4, 0.2]\n p['unmetCareHealthParam'] = 0.1\n p['hospitalizationParam'] = 0.5\n p['needLevelParam'] = 2.0\n p['unmetSocialCareParam'] = 2.0\n p['costHospitalizationPerDay'] = 400\n \n # ageTransitions, enterWorkForce and marketWage functions parameters\n p['ageTeenagers'] = 12\n p['minWorkingAge'] = 16\n \n ######## Key parameter 3 ##############\n \n p['careBankingSchemeOn'] = False\n p['socialCareBankingAge'] = 65\n \n p['absoluteCreditQuantity'] = False\n p['quantityYearlyIncrease'] = 0.0\n p['socialCareCreditQuantity'] = 0\n p['kinshipNetworkCarePropension'] = 0.5\n p['volunteersCarePropensionCoefficient'] = 0.01\n p['pensionContributionRate'] = 0.05\n \n p['hillHealthLevelThreshold'] = 3\n p['seriouslyHillSupportRate'] = 0.5\n \n ### Prices ####\n p['pricePublicSocialCare'] = 20.0 # [2.55] # 20\n p['priceSocialCare'] = 17.0 # [2.29] # 18\n p['taxBrackets'] = [663, 228, 0] # [28.16, 110.23] # [221, 865]\n p['taxBandsNumber'] = 3\n p['bandsTaxationRates'] = [0.4, 0.2, 0.0] # [0.0, 0.2, 0.4]\n # Tax Break Policy\n\n \n p['pensionWage'] = [5.0, 7.0, 10.0, 13.0, 18.0] # [0.64, 0.89, 1.27, 1.66, 2.29] # \n p['incomeInitialLevels'] = [5.0, 7.0, 9.0, 11.0, 14.0] #[0.64, 0.89, 1.15, 1.40, 1.78] # \n p['incomeFinalLevels'] = [10.0, 15.0, 22.0, 33.0, 50.0] #[1.27, 1.91, 2.80, 4.21, 6.37] # \n p['educationCosts'] = [0.0, 100.0, 150.0, 200.0] #[0.0, 12.74, 19.12, 25.49] # \n \n # Priced growth #####\n p['wageGrowthRate'] = 1.0 # 1.01338 # \n\n p['incomeGrowthRate'] = [0.4, 0.35, 0.35, 0.3, 0.25]\n \n # SES inter-generational mobility parameters\n p['leaveHomeStudentsProb'] = 0.5\n \n p['eduWageSensitivity'] = 0.2 # 0.5\n p['eduRankSensitivity'] = 3.0 # 5.0\n p['costantIncomeParam'] = 80.0 # 20.0\n p['costantEduParam'] = 10.0 # 10.0\n p['careEducationParam'] = 0.005 # 0.04\n \n \n \n # p['incEduExp'] = 0.25\n p['educationLevels'] = ['GCSE', 'A-Level', 'HND', 'Degree', 'Higher Degree']\n p['workingAge'] = [16, 18, 20, 22, 24]\n \n # doDivorce function parameters\n p['basicDivorceRate'] = 0.06\n p['variableDivorce'] = 0.06\n p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n p['divorceBias'] = 1.0\n \n # doMarriages function parameters\n p['deltageProb'] = [0.0, 0.1, 0.25, 0.4, 0.2, 0.05]\n p['incomeMarriageParam'] = 0.025\n p['studentFactorParam'] = 0.5\n ######## Key parameter 4 ##############\n p['betaGeoExp'] = 2.0 #[1.0 - 4.0]\n \n p['betaSocExp'] = 2.0\n p['rankGenderBias'] = 0.5\n p['basicMaleMarriageProb'] = 0.9\n p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0 ]\n \n # jobMarket, updateWork and unemploymentRate functions parameters\n p['unemploymentClassBias'] = 0.75\n p['unemploymentAgeBias'] = [1.0, 0.55, 0.35, 0.25, 0.2, 0.2]\n p['numberAgeBands'] = 6\n p['jobMobilitySlope'] = 0.004\n p['jobMobilityIntercept'] = 0.05\n p['ageBiasParam'] = [7.0, 3.0, 1.0, 0.5, 0.35, 0.15]\n p['deltaIncomeExp'] = 0.05\n p['unemployedCareBurdernParam'] = 0.025\n # Potential key parameter\n p['relocationCareLossExp'] = 1.0 # 40.0 # \n p['incomeSocialCostRelativeWeight'] = 0.5\n \n p['firingParam'] = 0.2\n p['wageVar'] = 0.06\n p['workDiscountingTime'] = 0.75 # 0.8\n p['sizeWeightParam'] = 0.7\n p['minClassWeightParam'] = 1.0\n p['incomeDiscountingExponent'] = 4.0\n p['discountingMultiplier'] = 2.0\n #p['incomeDiscountingParam'] = 2.0\n \n # relocationPensioners function parameters\n p['agingParentsMoveInWithKids'] = 0.1\n p['variableMoveBack'] = 0.1\n p['retiredRelocationParam'] = 0.001 # 0.005\n \n # houseMap function parameters\n p['geoDistanceSensitivityParam'] = 2.0\n p['socDistanceSensitivityParam'] = 2.0\n p['classAffinityWeight'] = 4.0\n p['distanceSensitivityParam'] = 0.5\n \n # relocationProb function parameters\n p['baseRelocatingProb'] = 0.05\n p['relocationParameter'] = 1.0 \n p['apprenticesRelocationProb'] = 0.5\n #p['expReloc'] = 1.0\n \n # computeRelocationCost and relocation Propensity functions parameters\n p['yearsInTownSensitivityParam'] = 0.5\n \n ######## Key parameter 5 ##############\n p['relocationCostParam'] = 0.5 # 1.0 \n \n ######## Key parameter 6 ##############\n p['propensityRelocationParam'] = 2.0 # 2.0 \n p['denRelocationWeight'] = 0.5\n \n \n ## Description of the map, towns, and houses\n p['mapGridXDimension'] = 8\n p['mapGridYDimension'] = 12 \n p['townGridDimension'] = 70\n p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]\n p['ukMap'] = [[ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],\n [ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],\n [ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ]]\n p['ukClassBias'] = [[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]\n p['mapDensityModifier'] = 0.6\n # p['numHouseClasses'] = 3\n # p['houseClasses'] = ['small','medium','large']\n \n ## Graphical interface details\n p['interactiveGraphics'] = False #True\n p['delayTime'] = 0.0\n p['screenWidth'] = 1300\n p['screenHeight'] = 700\n p['bgColour'] = 'black'\n p['mainFont'] = 'Helvetica 18'\n p['fontColour'] = 'white'\n p['dateX'] = 70\n p['dateY'] = 20\n p['popX'] = 70\n p['popY'] = 50\n p['pixelsInPopPyramid'] = 2000\n p['num5YearAgeClasses'] = 28\n p['careLevelColour'] = ['blue','green','yellow','orange','red']\n p['houseSizeColour'] = ['brown','purple','yellow']\n p['pixelsPerTown'] = 56\n p['maxTextUpdateList'] = 22\n \n # p['eduEduSensitivity'] = 0.5\n # p['mortalityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['fertilityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['divorceBias'] = [2.0, 1.5, 1.0, 0.75, 0.5]\n\n ## Transitions to care statistics\n \n ## Availability of care statistics\n \n #p['childHours'] = 5.0\n # p['employedHours'] = 12.0\n #p['homeAdultHours'] = 30.0\n #p['workingAdultHours'] = 25.0\n #p['maxEmployedHours'] = 60.0\n \n #p['lowCareHandicap'] = 0.5\n #p['hourlyCostOfCare'] = 20.0\n \n ## Fertility statistics\n \n # p['steadyPopBirthProb'] = 0.13\n # p['transitionYear'] = 1965\n \n ## Class and employment statistics\n # p['numClasses'] = 5\n # p['occupationClasses'] = ['lower','intermediate','higher']\n # p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]\n\n ## Age transition statistics\n # p['ageOfAdulthood'] = 17\n \n ## Marriage function parameters\n \n # p['basicFemaleMarriageProb'] = 0.25\n # p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['femaleMarriageProb'] = [0.01, 0.15, 0.3, 0.2, 0.1, 0.1, 0.06, 0.05, 0.02, 0.01, 0.01, 0.005]\n # p['maleMarriageProb'] = [0.005, 0.08, 0.25, 0.25, 0.15, 0.1, 0.07, 0.05, 0.03, 0.02, 0.01, 0.005]\n \n ## Leaving home and moving around statistics\n # p['probApartWillMoveTogether'] = 0.3\n # p['coupleMovesToExistingHousehold'] = 0.3\n # p['basicProbAdultMoveOut'] = 0.22\n # p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbSingleMove'] = 0.05\n # p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbFamilyMove'] = 0.03\n # p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]\n\n \n return p",
"def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()",
"def Initialize(self):\n problem_data = self.project_parameters[\"problem_data\"]\n if problem_data.Has(\"start_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"start_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy parameter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"start_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n if problem_data.Has(\"end_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"end_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy paramter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"end_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n # Initialize super class\n super().Initialize()\n\n # Initialize solution stepping\n self.step = 0\n self.time = 1\n if not problem_data.Has(\"nsteps\"):\n raise Exception(\"StructuralMechanicsPrebucklingAnalysis: \" + 'Maximum number of steps \"nsteps\" must be provided\"!')\n else:\n self.nsteps = problem_data[\"nsteps\"].GetInt()\n\n ## If the echo level is high enough, print the complete list of settings used to run the simualtion\n if self.echo_level > 1:\n with open(\"ProjectParametersOutput.json\", 'w') as parameter_output_file:\n parameter_output_file.write(self.project_parameters.PrettyPrintJsonString())\n\n KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(), \"Analysis -START- \")",
"def start(self):\n print(\"Commands:\")\n print(\"1 - Default run: n=4, noEpoch=100 and noAnts=3\")\n print(\"2 - You choose these parameters :)\")\n print(\"3 - Statistics\")\n cmd = input(\"Enter command: \")\n if cmd == \"2\":\n n = int(input(\"Give n:\"))\n noEpoch = int(input(\"Give nr epoch:\"))\n noAnts = int(input(\"Give nr ants:\"))\n problem = Problem(\"specific_params.txt\")\n ctrl = Controller(noAnts,n,noEpoch,problem)\n ctrl.runAlg()\n \n if cmd==\"1\":\n problem = Problem(\"specific_params.txt\")\n ctrl = Controller(3,4,100,problem)\n ctrl.runAlg()\n \n if cmd == \"3\":\n s = Statistics()\n s.statistics(4)",
"def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()",
"def _initialise_run(self) -> None:"
] |
[
"0.74145514",
"0.7046797",
"0.68025666",
"0.6763831",
"0.675749",
"0.66793156",
"0.6660954",
"0.6620679",
"0.6596364",
"0.65661407",
"0.65359855",
"0.6486038",
"0.6465894",
"0.6434889",
"0.64232063",
"0.6396819",
"0.6377265",
"0.635895",
"0.63383824",
"0.6292895",
"0.6278178",
"0.6273077",
"0.6261684",
"0.6249959",
"0.62463903",
"0.6245399",
"0.62306255",
"0.621868",
"0.62013936",
"0.619607"
] |
0.71622616
|
1
|
Print out name, current params and stored simulation runs
|
def __str__(self):
print("")
s = "NAME : "+self._name+"\n\n"
s += "PARAMS :"
print(s)
for key, val in self.params.items():
l = (21-len(key))//7
print("{0}".format(key)+"\t"*l+":\t{0}".format(val))
s = "\nRuns stored in DEFAULT_RUNS = "+str(len(self.default_runs))
print(s)
s = "\nRuns stored in MOD_RUNS = "+str(len(self.mod_runs))
print(s)
return ""
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def display_sim_parameters(self):\n pprint.pprint(vars(self))\n return",
"def print_params(self):\n\n logger.info('SimulatedMaps has been initialised with the following attributes:')\n for key in self.params.keys():\n logger.info('{} = {}'.format(key, self.params[key]))",
"def display(self):\n print(\"{}, {}\".format(self.label, self.params))",
"def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"",
"def print_str(self):\n print('*StanGpMatern with params={}'.format(self.params))",
"def print_me(self):\n\n print(\"----- Model:\",self.name,\" -----\")\n print(\"Mass (in M_sun): %.5f\" % (self.glb[imass]/constants.solar_mass))\n print(\"Radius (in R_sun): %.5f\" % (self.glb[iradius]/constants.solar_radius))\n print(\"Reference frequency (in uHz): %.3f\" % self.glb[ifreq_ref])\n print(\"Temperature (in K): %.1f\" % self.glb[itemperature])\n print(\"Luminosity (in L_sun): %.3g\" % (self.glb[iluminosity]/constants.solar_luminosity))\n print(\"Age (in Myrs): %.2f\" % self.glb[iage])\n print(\"Z: %.4f\" % self.glb[iz0])\n print(\"X: %.4f\" % self.glb[ix0])\n for (name, latex_name) in config.user_params:\n print(\"{0:29} {1:.5e}\".format(name,self.glb[user_params_index[name]]))\n print(\"Modes (in muHz):\")\n size = self.modes.shape[0]\n for i in range(size):\n print(\" (n,l,freq,IK) = (%d, %d, %.15f, %.5e)\" % \\\n (self.modes['n'][i], self.modes['l'][i], \\\n self.modes['freq'][i]*self.glb[ifreq_ref],\\\n self.modes['inertia'][i]))",
"def view(self,\n print_global_settings=True,\n print_general_settings=True,\n print_tmp_vals=False,\n print_results=True,\n **kws\n ):\n\n print(self.name)\n\n if print_global_settings:\n print(\"Global settings:\")\n pprint.pprint(self.global_settings)\n print()\n\n if print_general_settings:\n print(\"General settings:\")\n pprint.pprint(self.settings[self.name]['General'])\n print()\n\n for i, x in enumerate(self.routine_template):\n print(f\"Step {i}, {x[0].__name__} ({x[1]})\")\n print(\"Settings:\")\n pprint.pprint(x[2], indent=4)\n\n if print_tmp_vals:\n try:\n print(\"Temporary values:\")\n pprint.pprint(x[3], indent=4)\n except IndexError:\n pass\n print()\n\n if print_results:\n print_step_results(self)",
"def print_data(self):\n for chain, gen in self.generations.items():\n print('Generations for chain %s: %d' % (chain, gen))\n print('Log likelihood effective size: %d' % self.loglik_effsize)\n print('Log likelihood relative difference: %f' % self.loglik_rel_diff)\n print('Max diff: %f' % self.max_diff)",
"def print_state():\n global simulator\n if simulator is None:\n print \"program is not started\"\n return\n print simulator.state()",
"def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)",
"def print_results(self):\n pass",
"def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)",
"def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())",
"def printParameters(self):\n with self._graph.as_default():\n for var in tf.global_variables():\n print(var.name)\n val = self._sess.run(var)\n print(val)",
"def run(self):\n self._display_sims(self._compute_sims())",
"def print_env_information(step_id, current_time, final_move, current_score, current_reward):\n print(\"Step: {}\".format(step_id))\n print(\"Current Time: {}\".format(current_time))\n print(\"Action: {}\".format(final_move))\n print(\"Current scenario score: {} \\nCurrent reward: {}\\n\".format(current_score, current_reward))",
"def printing_vars(self):\n print(\"Name is \", self.name)",
"def print_results(self, data: SimData) -> None:\n pass",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=4,\n task=4,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.01,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"fat-tail\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(\n params[key],\n actual_sim_parameters[key],\n msg=\"Error in {}: {}!={}\".format(key, params[key], actual_sim_parameters[key]),\n )\n self.assertEqual(self.tree.get_job()[0], 4, msg=\"Job number not stored correctly.\")\n self.assertEqual(self.tree.get_job()[1], 4, msg=\"Job number not stored correctly.\")",
"def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=29,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"none\",\n coarse_map_x=13,\n coarse_map_y=13,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"tiled_fine\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 29)",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=3,\n task=3,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"none\",\n coarse_map_x=13,\n coarse_map_y=13,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 3)\n self.assertEqual(self.tree.get_job()[1], 3)",
"def inspect_state(self):\n for name in self._param_store.get_all_param_names():\n self._logger.info(\"Param [%s]: %r\", name,\n pyro.param(name).data.numpy())",
"def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))",
"def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()",
"def run_info(self):\n return \"MPI: %d, OMP: %d\" % (self.mpi_procs, self.omp_threads)",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=30,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"tiled_coarse\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 30)",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=2,\n task=2,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=1.0,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"null\",\n coarse_map_x=20,\n coarse_map_y=20,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"null\",\n fine_map_x=10,\n fine_map_y=10,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=10,\n grid_y=10,\n sample_x=10,\n sample_y=10,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=1.0,\n cutoff=0.0,\n landscape_type=\"infinite\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key], msg=\"Error in {}\".format(key))\n self.assertEqual(self.tree.get_job()[0], 2)\n self.assertEqual(self.tree.get_job()[1], 2)",
"def testSimParamsStored(self):\n params = self.tree.get_simulation_parameters()\n actual_sim_parameters = dict(\n seed=1,\n task=1,\n output_dir=\"output\",\n speciation_rate=0.1,\n sigma=4.0,\n tau=4.0,\n deme=1,\n sample_size=1.0,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=0.0,\n time_config_file=\"null\",\n coarse_map_file=\"null\",\n coarse_map_x=20,\n coarse_map_y=20,\n coarse_map_x_offset=0,\n coarse_map_y_offset=0,\n coarse_map_scale=1.0,\n fine_map_file=\"null\",\n fine_map_x=10,\n fine_map_y=10,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"null\",\n grid_x=10,\n grid_y=10,\n sample_x=10,\n sample_y=10,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"fat-tail\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"infinite\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in params.keys():\n self.assertEqual(params[key], actual_sim_parameters[key])\n self.assertEqual(self.tree.get_job()[0], 1)\n self.assertEqual(self.tree.get_job()[1], 1)",
"def display_parameters(self):\n ips = GAConfig[\"initial_population_size\"]\n ps = GAConfig[\"population_size\"]\n nomp = GAConfig[\"num_mating_pairs\"]\n mf = GAConfig[\"base_mutation_factor\"]\n ne = GAConfig[\"num_evolutions\"]\n noc = GAConfig[\"num_categories\"]\n nog = len(self.phones)\n\n display_string = \"\"\"\n Genetic Algorithm Parameters\n ----------------------------\n Initial Population Size %s\n Population Size %s\n Number of Mating Pairs %s\n Base Mutation Factor %s\n Number of Evolutions %s\n Number of Categories %s\n Number of Genes %s\n \"\"\" % (ips, ps, nomp, mf, ne, noc, nog)\n\n print(display_string)"
] |
[
"0.66638035",
"0.6654243",
"0.65848184",
"0.64983094",
"0.63729537",
"0.6371522",
"0.63300407",
"0.628227",
"0.6249049",
"0.6231643",
"0.6209212",
"0.62031406",
"0.619824",
"0.61875284",
"0.61813885",
"0.61715716",
"0.6169657",
"0.6163123",
"0.61437184",
"0.61434895",
"0.613427",
"0.6116312",
"0.61107606",
"0.6109901",
"0.60974634",
"0.60732406",
"0.60724354",
"0.60658634",
"0.60602933",
"0.60451615"
] |
0.72447944
|
0
|
run a bunch of simulations, modulating one parameter each time synapse_distr should be a tuple from i_o.get_synapse_range() store output in tuple containing list of simulation run objects and mod_range
|
def run_modulation(self,parameter="cav_p_open",mod_range=[(x+1)/20 for x in range(20)],synapse_distr=False):
sim_runs = []
print("Running Modulation of "+parameter+" for range:")
print(mod_range)
print("")
for x in range(len(mod_range)):
print("Run #"+str(x+1)+" with "+parameter+" at "+str(mod_range[x])+" of default")
alt_params = self.params.copy()
alt_params[parameter] *= mod_range[x]
sim_runs.append(self._runModel(params=alt_params,nonuniform_parameter=synapse_distr))
print("")
print("----")
print("Storing sim_runs in mod_runs as\n(sim_runs, mod_dict)\nwhere mod_dict = {parameter:mod_range}\n")
self.mod_runs.append((sim_runs,{parameter:mod_range}))
print("Done with Modulation of "+parameter)
print("----")
return (sim_runs,{parameter:mod_range})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def run_sim(mass, start, stop, sampling_rate):\n axion = Axion(mass=mass)\n return axion.do_fast_axion_sim(start,\n stop,\n sampling_rate)",
"def make_simulations(self):\n pass",
"def runSim(self):\n if self.verbose:\n print(\"Running Simulation, This may take a while\")\n self.makeXData(float(self.pretime))\n pool = Pool(processes=len(self.powers))\n jobs = []\n self.gem_pair = []\n self.electron = []\n self.hole = []\n self.filled = []\n self.signal = []\n self.gsignal = []\n self.ehsignal = []\n self.gloss = []\n self.tloss = []\n self.qk = []\n for power, pulse in zip(self.powers, self.pulses):\n inputs = [power, pulse, self.steps, self.trap, self.tolerance,\n self.EHdecay, self.Etrap, self.FHloss, self.Gdecay,\n self.G2decay, self.G3decay, self.GHdecay, self.Gescape,\n self.Gform, self.G3loss, self.Keq, self.trackQ,\n self.verbose]\n jobs.append(pool.apply_async(powerRun, inputs))\n for job in jobs:\n gem_pair, electron, hole, filled, signal, gsignal, ehsignal, gloss, tloss, qk = job.get()\n self.signal.append(signal * self.scalar / self.step)\n self.gsignal.append(gsignal * self.scalar / self.step)\n self.ehsignal.append(ehsignal * self.scalar / self.step)\n self.gloss.append(gloss * self.scalar / self.step)\n self.tloss.append(tloss * self.scalar / self.step)\n self.gem_pair.append(gem_pair)\n self.electron.append(electron)\n self.hole.append(hole)\n self.filled.append(filled)\n self.qk.append(qk)\n pool.close()",
"def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)",
"def run_simulation(self, number_runs = 1):\n for i in range(0, number_runs):\n self.ques = [self.start for i in range(0, self.numQueues)]\n run = self.__single_sim_results()\n run_results = pd.DataFrame({'simulation':i,\n 'num_items': len(run),\n 'wait_count': len(run[run['wait_time']>datetime.timedelta(seconds=0)]),\n 'avg_wait_time': run.wait_time.mean(),\n 'close_time': max(run['appt_end_time'])}, index=[i])\n self.results = pd.concat([self.results, run_results], ignore_index=True)\n self.results['last_appt_to_close_minutes'] = (self.results['close_time']-self.end).dt.total_seconds().div(60)\n return",
"def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )",
"def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)",
"def simulate(self, **args):\n snr = ct.c_double * 3\n self.sim_params = {**self.sim_params, **args}\n snr = snr(*self.sim_params[\"snr\"])\n dec_param = decoder_param(self.sim_params[\"earlyTerm\"], self.sim_params[\"iterations\"], self.sim_params[\"decoding\"].encode(\"utf-8\"))\n ch_param = channel_param(self.sim_params[\"seed\"], snr, self.sim_params[\"channel\"].encode(\"utf-8\"))\n sim_param = simulation_param(self.sim_params[\"threads\"], self.sim_params[\"maxFrames\"], self.sim_params[\"fec\"], \"\".encode(\"utf-8\"))\n\n def sim_thread():\n self.sim_stop_flag.value = False\n\n self.lib.argtypes = (decoder_param, channel_param, simulation_param, sim_results_t, ct.c_bool)\n self.lib.simulate(\n dec_param,\n ch_param,\n sim_param, \n ct.byref(self.sim_results_struct),\n ct.byref(self.sim_stop_flag)\n )\n \n th_sim = threading.Thread(target=sim_thread)\n th_sim.start()",
"def simulation_run_v4(num_demand_nodes,num_nurses,time_horizon, locations, fixed_service_time=True, shifts=False, restrictions=False):\n\n # generate demand nodes, customers, and nurses\n node = True\n node_type = input('Input \"actual\" for nodes to be actual locations, \"random\" for nodes to be randomly generated locations: ')\n customer_type = input('Input \"random\" for random arrival rate, \"rate from data\", or \"actual\" for actual data: ')\n if node_type == \"actual\":\n demand_node_list = generate_demand_nodes_from_data(locations, customer_type)\n elif node_type == \"random\":\n radius = float(input(\"Input a radius for the demand nodes: \"))\n demand_node_list = generate_demand_nodes_in_zip(locations, num_demand_nodes, radius, customer_type)\n else:\n raise InputError\n file_name = write_file(demand_node_list,demand_node_list)\n distance_matrix = distance_between_nodes_api(demand_node_list)\n if customer_type == \"random\" or \"random from data\":\n customer_list = generate_customers(demand_node_list, time_horizon, fixed_service_time)\n elif customer_type == \"actual\":\n start = input(\"Input a start time (hour in military time): \")\n stop = start+(time_horizon/60)\n customer_list = generate_customers_from_data(node_list, fixed_service_time, start, stop)\n day = input(\"Input a number 0-6 corresponding to a day Sun-Sat: \")\n customer_list = customer_list[day]\n if shifts is True:\n shift_lower_bound = input('Input the minimum shift length: ')\n shift_upper_bound = input('Input the maximum shift length: ')\n else:\n shift_lower_bound = 0\n shift_upper_bound = time_horizon\n if restrictions is True:\n # build probabilities dictionary\n probabilities = {}\n for node in demand_node_list:\n print(\"Latitude: \" % s) % node.lat\n print(\"Longitude: \" % s) % node.lon\n prob = input('Input the probability of a nurse being restricted from this node: ')\n probabilites[node.id_number] = prob\n nurse_list = generate_nurses_with_restrictions(probabilities, node_list,num_nurses, time_horizon,\n shift_lower_bound, shift_upper_bound, node)\n else:\n nurse_list = generate_nurses(num_nurses, num_demand_nodes, demand_node_list, time_horizon, shift_lower_bound, shift_upper_bound,node)\n\n # customers are served in the order they arrive (at the moment)\n current_time = 0\n for customer in customer_list:\n # next event dispatch after a customer arrives and at least one nurse is available\n current_time = max(current_time, customer.arrival_time)\n # choose which nurse to dispatch\n nurse_to_dispatch, dispatch_time, nurses_working = updated_dispatch_nurse(nurse_list, customer,\n current_time, distance_matrix,\n fixed_service_time,\n time_horizon)\n # serve customer and update metrics\n current_time = updated_serve_customer(nurse_to_dispatch, customer, distance_matrix, dispatch_time,\n nurses_working)\n # report on simulation\n time_varying_system_metrics(nurse_list, customer_list, fixed_service_time, time_horizon)\n aggregate_system_metrics(nurse_list, customer_list)\n return nurse_list, demand_node_list",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def abstract_sim(self, dictionary, globalVar, listOfValuesToTest):\n\t\tstorage = getattr(toggles, globalVar)\n\t\tcounts = []\n\t\tfor i in range(len(listOfValuesToTest)):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running for: \" + str(listOfValuesToTest[i])\n\t\t\tsetattr(toggles, globalVar, listOfValuesToTest[i])\n\t\t\tcounts.append([])\n\t\t\tfor run in range(toggles.NUM_SIM):\n\t\t\t\tself.run_sim(dictionary)\n\t\t\t\tcounts[i].append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint run\n\t\tavgL, stdL = [], []\n\t\tfor ls in counts:\n\t\t\tavgL.append(np.mean(ls))\n\t\t\tstdL.append(np.std(ls))\n\t\tif toggles.GEN_GRAPHS:\n\t\t\tgraphGen.abstract_sim(globalVar, listOfValuesToTest, avgL, stdL, counts, toggles.OUTPUT_PATH) # TODO clean this comment\n\t\t# labels = (str(globalVar),'Task Count')\n\t\t# title = str(globalVar) + \" variance impact on Task Count\"\n\t\t# dest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_abstract_sim'\n\t\t# if toggles.GEN_GRAPHS:\n\t\t# \tline_graph_gen(listOfValuesToTest, avgL, dest +'line.png',stderr = stdL,labels=labels, title = title)\n\t\t# \tif toggles.DEBUG_FLAG:\n\t\t# \t\tprint \"Wrote File: \" + dest+'line.png'\n\t\t# \tif len(counts[0])>1:\n\t\t# \t\tmulti_hist_gen(counts, listOfValuesToTest, dest +'hist.png',labels=labels, title = title)\n\t\t# \t\tif toggles.DEBUG_FLAG:\n\t\t# \t\t\tprint \"Wrote File: \" + dest+'hist.png'\n\t\t# \telif toggles.DEBUG_FLAG:\n\t\t# \t\tprint \"only ran one sim, ignoring hist_gen\"\n\n\t\tsetattr(toggles, globalVar, storage)\n\t\treturn",
"def test_tuple_synapses(self):\n # reproducible arbitrariness\n np.random.seed(5003)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 1.5\n\n tmax = 10*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim1.run(tmax)\n\n final1 = np.copy(self.syns.W)\n\n self.syns.W = np.copy(W0)\n\n rule2 = SuperExponentialPlasticity(\n (self.syns.source, self.syns.target, self.syns.W),\n self.tutor, constrain_positive=False, rate=1-6)\n rule2.alpha = 1.0\n rule2.beta = 1.5\n\n sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, rule2, dt=self.dt)\n sim2.run(tmax)\n\n final2 = np.copy(self.syns.W)\n\n self.assertTrue(np.allclose(final1, final2))",
"def simulateDataOnHimster(thisExperiment: Experiment, thisScenario: Scenario) -> Scenario:\n\n for task in thisScenario.SimulationTasks:\n\n print(f\"running simulation of type {str(task.simDataType)} and path ({task.dirPath} at states:\")\n print(f\"current state: {str(task.simState)}\")\n print(f\"last state: {str(task.lastState)}\")\n\n data_keywords = []\n data_pattern = \"\"\n\n cut_keyword = generateCutKeyword(thisExperiment.recoParams)\n\n print(f\"cut keyword is {cut_keyword}\")\n\n merge_keywords = [\"merge_data\", \"binning_300\"]\n # if \"v\" in task.simType:\n if task.simDataType == SimulationDataType.VERTEX:\n data_keywords = [\"uncut\", \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_vertex_data_\"\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_data_\"\n elif task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_res_data_\"\n else:\n raise NotImplementedError(f\"Simulation type {task.simDataType} is not implemented!\")\n\n # 1. simulate data\n if task.simState == SimulationState.START_SIM:\n os.chdir(lmd_fit_script_path)\n status_code = 1\n # if \"er\" in task.simType:\n if task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n \"\"\"\n efficiency / resolution calculation.\n\n Takes an offset of the IP into account.\n\n TODO: This needs to know the misalignment of the detector.\n \"\"\"\n found_dirs = []\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher(\n [\n thisExperiment.recoParams.simGenTypeForResAcc.value,\n data_keywords[0],\n ] # look for the folder name including sim_type_for_resAcc\n )\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n print(f\"found dirs now: {found_dirs}\")\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n elif task.lastState < SimulationState.START_SIM:\n # then lets simulate!\n # this command runs the full sim software with box gen data\n # to generate the acceptance and resolution information\n # for this sample\n # note: beam tilt and divergence are not necessary here,\n # because that is handled completely by the model\n\n # because we don't want to change the experiment config or\n # anything in the simParams, recoParam, alignParams,\n # we'll create temp objects here.\n\n tempSimParams = thisExperiment.simParams\n tempRecoParams = thisExperiment.recoParams\n tempAlignParams = thisExperiment.alignParams\n\n thisIPX = tempRecoParams.recoIPX\n thisIPY = tempRecoParams.recoIPY\n thisIPZ = tempRecoParams.recoIPZ\n\n max_xy_shift = math.sqrt(thisIPX**2 + thisIPY**2)\n max_xy_shift = float(\"{0:.2f}\".format(round(float(max_xy_shift), 2)))\n\n # since this is the res/acc case, these parameters must be changed\n tempSimParams.simGeneratorType = tempRecoParams.simGenTypeForResAcc\n tempSimParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n tempSimParams.num_samples = tempRecoParams.num_resAcc_samples\n tempSimParams.theta_min_in_mrad -= max_xy_shift\n tempSimParams.theta_max_in_mrad += max_xy_shift\n tempSimParams.ip_offset_x = thisIPX\n tempSimParams.ip_offset_y = thisIPY\n tempSimParams.ip_offset_z = thisIPZ\n\n # since this is the res/acc case, these parameters must be updated\n tempRecoParams.num_samples = tempRecoParams.num_resAcc_samples\n tempRecoParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n\n # TODO: alignment part\n # if alignement matrices were specified, we used them as a mis-alignment\n # and alignment for the box simulations\n\n (job, returnPath) = create_simulation_and_reconstruction_job(\n tempSimParams,\n tempAlignParams,\n tempRecoParams,\n application_command=thisScenario.Sim,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.acc_and_res_dir_path = returnPath\n # last_state += 1\n # last state was < 1, so 0. That means an increase is now 1\n task.lastState = SimulationState.START_SIM\n\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n \"\"\"\n a is the angular case. this is the data set onto which the luminosiy fit is performed.\n it is therefore REAL digi data (or DPM data of course) that must be reconstructed again\n with the updated reco parameter (like the IP position, cuts applied and alignment).\n note: beam tilt and divergence are not used here because\n only the last reco steps are rerun of the track reco\n \"\"\"\n found_dirs = []\n status_code = 1\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher([\"dpm_elastic\", data_keywords[0]])\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n\n # oh boi that's bound to be trouble with IntEnums\n elif task.lastState < task.simState:\n\n # * reco params must be adjusted if the res/acc sample had more jobs or samples that the real (or dpm) data\n rec_par = thisExperiment.recoParams\n if thisExperiment.recoParams.num_samples > 0 and rec_par.num_samples > thisExperiment.recoParams.num_samples:\n rec_par.num_samples = thisExperiment.recoParams.num_samples\n\n # TODO: have alignment parameters changed? take them from the experiment\n align_par = thisExperiment.alignParams\n\n (job, returnPath) = create_reconstruction_job(\n rec_par,\n align_par,\n str(thisExperiment.baseDataOutputDir),\n application_command=thisScenario.Reco,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.filteredTrackDirectory = returnPath\n\n # Simulation is done, so update the last_state\n task.lastState = SimulationState.START_SIM\n\n # elif \"v\" in task.simType:\n elif task.simDataType == SimulationDataType.VERTEX:\n\n # TODO: check if the sim data is already there, if yes return 0, else start sim\n status_code = 0\n\n # # vertex Data must always be created without any cuts first\n # tempRecoPars = thisExperiment.recoParams\n # tempRecoPars.use_xy_cut = False\n # tempRecoPars.use_m_cut = False\n\n # # TODO: misalignment is important here. the vertex data can have misalignment (because it's real data)\n # # but it has no alignment yet. that is only for the second reconstruction\n # tempAlignPars = thisExperiment.alignParams\n # tempAlignPars.alignment_matrices_path = None\n\n # job, _ = create_simulation_and_reconstruction_job(\n # thisExperiment.simParams,\n # tempAlignPars,\n # tempRecoPars,\n # use_devel_queue=args.use_devel_queue,\n # application_command=thisScenario.Sim,\n # )\n # job_manager.append(job)\n\n else:\n raise ValueError(f\"This tasks simType is {task.simDataType}, which is invalid!\")\n\n if status_code == 0:\n print(\"found simulation files, skipping\")\n task.simState = SimulationState.MAKE_BUNCHES\n task.lastState = SimulationState.START_SIM\n elif status_code > 0:\n print(f\"still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n raise ValueError(\"status_code is negative, which means number of running jobs can't be determined. \")\n\n # 2. create data (that means bunch data, create data objects)\n if task.simState == SimulationState.MAKE_BUNCHES:\n # check if data objects already exists and skip!\n temp_dir_searcher = general.DirectorySearcher(data_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n status_code = 1\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n data_pattern + \"*\",\n is_bunches=True,\n )\n\n elif task.lastState < task.simState:\n os.chdir(lmd_fit_script_path)\n # bunch data\n # TODO: pass experiment config, or better yet, make class instead of script\n bashcommand = (\n \"python makeMultipleFileListBunches.py \"\n + f\" --filenamePrefix {thisScenario.track_file_pattern}\"\n + \" --files_per_bunch 10 --maximum_number_of_files \"\n + str(thisExperiment.recoParams.num_samples)\n + \" \"\n + task.dirPath\n )\n print(f\"Bash command for bunch creation:\\n{bashcommand}\\n\")\n _ = subprocess.call(bashcommand.split())\n # TODO: pass experiment config, or better yet, make class instead of script\n # create data\n bashArgs = []\n # if \"a\" in task.simType:\n if task.simDataType == SimulationDataType.ANGULAR:\n el_cs = thisScenario.elastic_pbarp_integrated_cross_secion_in_mb\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n if el_cs:\n bashArgs.append(\"--elastic_cross_section\")\n bashArgs.append(str(el_cs))\n # bashcommand += \" --elastic_cross_section \" + str(el_cs)\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n print(bashArgs)\n _ = subprocess.call(bashArgs)\n\n # last_state = last_state + 1\n # was apparently bunches\n task.lastState = SimulationState.MERGE\n\n bashArgs.clear()\n\n # else:\n # raise RuntimeError(\"No data could be found, but no commands are to be executed. This can't be!\")\n\n if status_code == 0:\n print(\"skipping bunching and data object creation...\")\n # state = 3\n task.simState = SimulationState.MERGE\n task.lastState = SimulationState.MAKE_BUNCHES\n elif status_code > 0:\n print(f\"status_code {status_code}: still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n # ok something went wrong there, exit this scenario and\n # push on bad scenario stack\n task.simState = SimulationState.FAILED\n raise ValueError(\"Something went wrong with the cluster jobs! This scenario will no longer be processed.\")\n\n # 3. merge data\n if task.simState == SimulationState.MERGE:\n # check first if merged data already exists and skip it!\n temp_dir_searcher = general.DirectorySearcher(merge_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n if not found_dirs:\n os.chdir(lmd_fit_script_path)\n # merge data\n # if \"a\" in task.simType:\n bashArgs = []\n if task.simDataType == SimulationDataType.ANGULAR:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--num_samples\")\n bashArgs.append(str(bootstrapped_num_samples))\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n print(\"working directory:\")\n print(f\"{os.getcwd()}\")\n print(f\"running command:\\n{bashArgs}\")\n _ = subprocess.call(bashArgs)\n\n task.simState = SimulationState.DONE\n\n if task.lastState == SimulationState.FAILED:\n thisScenario.is_broken = True\n break\n\n # remove done tasks\n thisScenario.SimulationTasks = [simTask for simTask in thisScenario.SimulationTasks if simTask.simState != SimulationState.DONE]\n\n return thisScenario",
"def simulation_run_v1(num_demand_nodes,num_nurses,time_horizon,arrival_rate,fixed_service_time=True,shifts=False,restrictions=False):\n\n # generate demand nodes, customers and nurses\n node = False\n demand_node_list=randomly_generate_uniform_demand_nodes(num_demand_nodes,arrival_rate)\n distance_matrix=distance_between_fake_nodes(demand_node_list)\n customer_list=generate_customers(demand_node_list,time_horizon,fixed_service_time)\n if shifts is True:\n shift_lower_bound = input('Input the minimum shift length: ')\n shift_upper_bound = input('Input the maximum shift length: ')\n else:\n shift_lower_bound = 0\n shift_upper_bound = time_horizon\n if restrictions is True:\n # build probabilities dictionary\n probabilities = {}\n for node in demand_node_list:\n print(\"Latitude: \" %s) %node.lat\n print(\"Longitude: \" %s) %node.lon\n prob = input('Input the probability of a nurse being restricted from this node: ')\n probabilites[node.id_number] = prob\n nurse_list = generate_nurses_with_restrictions(probabilities, node_list,num_nurses, time_horizon,\n shift_lower_bound, shift_upper_bound, node)\n else:\n nurse_list=generate_nurses(num_nurses,num_demand_nodes, demand_node_list, time_horizon, shift_lower_bound, shift_upper_bound)\n\n # customers are served in the order they arrive (at the moment)\n current_time=0\n for customer in customer_list:\n\n #next event dispatch after a customer arrives and at least one nurse is available\n current_time=max(current_time,customer.arrival_time)\n # choose which nurse to dispatch\n nurse_to_dispatch,dispatch_time=dispatch_nurse(nurse_list,customer,current_time,distance_matrix,fixed_service_time)\n # serve customer and update metrics\n serve_customer(nurse_to_dispatch,customer,current_time,distance_matrix,dispatch_time)\n #report on simulation\n time_varying_system_metrics(nurse_list,customer_list,fixed_service_time, time_horizon)\n aggregate_system_metrics(nurse_list,customer_list)\n return nurse_list, demand_node_list",
"def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"",
"def do_work(self, data):\n rank = MPI.COMM_WORLD.Get_rank()\n name = MPI.Get_processor_name()\n\n print(' Slave %s rank %d executing task %s' % (name, rank, data['task']))\n\n if data['task'] == 'initial_sim':\n # define explicit assimulo problem\n sim_obj = data['sim_obj']\n rhs_fun = sim_obj.rhs_fun # data['rhs_fun']\n y_initial = data['y0']\n estimate_id = data['id']\n ode_opts = sim_obj.ode_opts # data['ode_opts']\n ode_sys_opts = data['ode_sys_opts']\n t_final = sim_obj.t_final # data['t_final']\n all_options = [ode_opts, ode_sys_opts]\n\n print(' Slave %s rank %d executing initial_sim for estimate: %s sample: %s, data set: %s' %\n (name, rank, estimate_id[0], estimate_id[1], estimate_id[2]))\n slave_tout, slave_yout, _, _ = simulate_ode(rhs_fun, y_initial, tf=t_final, opts=all_options)\n print(' ode simulation complete ')\n\n # calculate flux\n flux_fun = sim_obj.flux_fun # data['flux_fun']\n slave_flux = np.array(list(map(lambda x: flux_fun(x, ode_sys_opts), slave_yout)))\n\n result = (slave_tout, slave_yout, slave_flux, estimate_id[0], estimate_id[1], estimate_id[2], sim_obj,\n ode_sys_opts)\n\n elif data['task'] == 'perturbation_sim':\n\n sim_obj = data['sim_obj']\n rhs_fun = sim_obj.rhs_fun # data['rhs_fun']\n y_initial = data['y0']\n estimate_id = data['id']\n perturbation_id = data['perturbation_id']\n ode_opts = sim_obj.ode_opts # data['ode_opts']\n ode_sys_opts = data['ode_sys_opts']\n t_final = sim_obj.t_final # data['t_final']\n all_options = [ode_opts, ode_sys_opts]\n\n print(' Slave %s rank %d executing initial_sim for estimate: %s sample: %s, data set: %s '\n 'perturbation: %s' %\n (name, rank, estimate_id[0], estimate_id[1], estimate_id[2], perturbation_id))\n slave_tout, slave_yout, _, _ = simulate_ode(rhs_fun, y_initial, tf=t_final, opts=all_options)\n print(' ode perturbation simulation complete ')\n\n # calculate flux\n flux_fun = sim_obj.flux_fun # data['flux_fun']\n slave_flux = np.array(list(map(lambda x: flux_fun(x, ode_sys_opts), slave_yout)))\n\n result = (slave_tout, slave_yout, slave_flux, estimate_id[0], estimate_id[1], estimate_id[2],\n perturbation_id)\n\n return data['task'], result",
"def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()",
"def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()",
"def step(self, actions):\n assert (len(actions) == len(self.simulators))\n\n data_out = {outp: self.num_simulators*[None] for outp in self.outputs}\n\n # def act(idx, s):\n # try:\n # response = s.step(actions[idx])\n # if self.simulator_type == 'room_simulator':\n # response = self._convert_observation(s, response, self.outputs) ## ATTENTION DANS DOOM ON MODIFIE DIRECTEMENT LES DATA DANS DOOM SIMULATOR\n # for outp in self.outputs:\n # data_out[outp][idx] = response[outp] # ICI LES DATA ONT LA BONNE SHAPE\n # except Exception as exc:\n # print('Exception when stepping simulator with id: ' + str(idx))\n # raise exc\n\n # with ThreadPoolExecutor(max_workers=self.num_simulators) as executor:\n # futures = []\n # for i in range(self.num_simulators):\n # future = executor.submit(act, i, self.simulators[i])\n # futures.append(future)\n # concurrent.futures.wait(futures)\n # # check if any exception\n # for f in futures:\n # f.result()\n\n data_out = {outp: [] for outp in self.outputs}\n \n for (sim, act) in zip(self.simulators, actions):\n data_one_sim = sim.step(act)\n for outp in self.outputs:\n data_out[outp].append(data_one_sim[outp])\n\n # print(data_out.keys())\n return data_out",
"def randomize_initial_conc_simulations(\n self, mod, n=10, lower_bound=0.1, upper_bound=10,\n plot=False, end_time=100, num_simulation_points=101,\n hspace=0.5, wspace=0.3, ncols=5, filename=None, **kwargs):\n ics = [i.replace('[', '').replace(']', '') for i in mod.getFloatingSpeciesConcentrationIds()]\n\n original_ics = dict(zip(ics, mod.getFloatingSpeciesConcentrations()))\n sample = lhs(n=len(original_ics), samples=n, iterations=1, criterion=None)\n sample = uniform(lower_bound, upper_bound).ppf(sample)\n\n print('Simulating time series data')\n simulations = {}\n for i in range(sample.shape[0]):\n print('Percent Complete: {}%'.format(round(i / sample.shape[0] * 100, 2)))\n mod.reset()\n for j in range(sample.shape[1]):\n setattr(mod, ics[j], sample[i, j])\n data = mod.simulate(0, end_time, num_simulation_points)\n df = pd.DataFrame(data)\n df.columns = [i.replace('[', '').replace(']', '') for i in data.colnames]\n simulations[i] = df.set_index('time')\n\n df = pd.concat(simulations)\n dct = {}\n for label, df2 in df.groupby(level=0):\n dct[label] = df2.subtract(df2.iloc[0])\n\n df = pd.concat(dct)\n df.index = df.index.droplevel(0)\n\n if plot:\n print('plotting time series data')\n nplots = df.shape[1]\n if nplots == 1:\n ncols = 1\n nrows = int(nplots / ncols)\n remainder = nplots % ncols\n if remainder > 0:\n nrows += 1\n\n fig = plt.figure(figsize=(20, 10))\n for i, species in enumerate(df.columns):\n plot_data = df[[species]].reset_index()\n plot_data.columns = ['iterations', 'time', species]\n # print(plot_data)\n ax = plt.subplot(nrows, ncols, i + 1)\n seaborn.lineplot(\n x='time', y=species, hue='iterations',\n data=plot_data, ax=ax, **kwargs, legend=False,\n palette='Blues'\n )\n\n seaborn.despine(ax=ax, top=True, right=True)\n plt.title(species)\n plt.xlabel('')\n plt.ylabel('')\n plt.subplots_adjust(hspace=hspace, wspace=wspace)\n\n if filename is None:\n plt.show()\n else:\n fig.savefig(filename, dpi=300, bbox_inches='tight')\n return df",
"def run(self, obs_data, eps_init, eps_last, eps_decay, n_particles, ess_min=0.5, logger=sys.stdout, info=False, rng=np.random):\n\n all_ps = []\n all_log_weights = []\n all_eps = []\n all_log_ess = []\n all_n_sims = []\n\n logger = open(os.devnull, 'w') if logger is None else logger\n\n # save some log values for reuse\n log_ess_min = np.log(ess_min)\n log_n_particles = np.log(n_particles)\n\n # sample initial population\n iter = 0\n eps = eps_init\n ps, n_sims = self.sample_initial_population(obs_data, n_particles, eps, logger, rng)\n log_weights = np.full(n_particles, -log_n_particles)\n\n if info:\n all_ps.append(ps)\n all_log_weights.append(log_weights)\n all_eps.append(eps)\n all_log_ess.append(0.0)\n all_n_sims.append(n_sims)\n\n logger.write('iter = {0}, eps = {1}, ess (%) = {2}, sims = {3}\\n'.format(iter, eps, 1.0, n_sims))\n\n while eps > eps_last:\n\n # sample next population\n iter += 1\n eps *= eps_decay\n ps, log_weights, n_new_sims = self.sample_next_population(ps, log_weights, obs_data, eps, logger, rng)\n n_sims += n_new_sims\n\n # calculate effective sample size\n log_ess = -scipy.misc.logsumexp(2.0 * log_weights) - log_n_particles\n\n # if population is degenerate, resample particles\n if log_ess < log_ess_min:\n ps = self.resample_population(ps, log_weights, rng)\n log_weights = np.full(n_particles, -log_n_particles)\n\n if info:\n all_ps.append(ps)\n all_log_weights.append(log_weights)\n all_eps.append(eps)\n all_log_ess.append(log_ess)\n all_n_sims.append(n_sims)\n\n logger.write('iter = {0}, eps = {1}, ess (%) = {2}, sims = {3}\\n'.format(iter, eps, np.exp(log_ess), n_sims))\n\n if info:\n return all_ps, all_log_weights, all_eps, all_log_ess, all_n_sims\n else:\n return ps, log_weights",
"def _simulation_run(model_instance, observations, actions, rewards):\r\n\r\n for observation, action, reward in zip(observations, actions, rewards):\r\n model_instance.observe(observation)\r\n model_instance.overrideActionChoice(action)\r\n model_instance.feedback(reward)\r\n\r\n return model_instance",
"def run(self,kRange=None,sigmaRange=None,chunks=None):\n\n ## run spectral clustering parameter search\n totalCores = cpu_count()\n totalCores = totalCores - 1\n\n ## specify the ranges\n if not kRange:\n kRange = np.array([int(round(i)) for i in np.linspace(20,500,15)])\n elif type(kRange) == type([]):\n kRange = np.array(kRange)\n\n ## different sigma ranges are appropriate for different GO aspects\n if sigmaRange:\n pass\n elif self.aspect == 'biological_process':\n sigmaRange = np.linspace(0.01,1.0,15)\n elif self.aspect == 'molecular_function':\n sigmaRange = np.linspace(1.0,2.0,15)\n elif self.aspect == 'cellular_component':\n sigmaRange = np.linspace(0.05,1.0,15)\n else:\n raise Exception(\"invalid aspect provided\")\n\n ## prepare outfiles\n outFid1 = open(self.resultsPath1,'wa')\n self.writer1 = csv.writer(outFid1)\n header1 = ['k','sigma','silvalue']\n self.writer1.writerow(header1)\n \n outFid2 = open(self.resultsPath2,'wa')\n self.writer2 = csv.writer(outFid2)\n header2 = ['k','sigma']+range(kRange.max())\n self.writer2.writerow(header2)\n\n ## limit each iteration to keep memory usage down \n if chunks:\n pass\n else:\n chunks = int(round((np.log(self.M.shape[0]))))\n print(\"chunks = %s\"%chunks)\n\n toRun = []\n for k in kRange:\n toRun += [(k,sigma,self.distancePath,self.dtype) for sigma in sigmaRange]\n\n stopPoints = np.arange(0,len(toRun),chunks)\n if stopPoints[-1] < len(toRun):\n stopPoints = np.hstack([stopPoints[1:],np.array([len(toRun)])])\n\n begin = 0\n\n if chunks == 1:\n self._run_sc(toRun)\n else:\n for i,chunk in enumerate(range(stopPoints.size)):\n stop = stopPoints[chunk]\n print('...running %s-%s/%s'%(begin,stop,len(toRun)))\n self.run_sc(toRun,begin,stop)\n begin = stop\n\n print(\"complete.\")\n outFid1.close()\n outFid2.close()",
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def split_simsplit_3epochs_iter1(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs",
"def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n self.print_averages()\n return d_El, energy, var",
"def test_tuple_synapses(self):\n # reproducible arbitrariness\n np.random.seed(5003)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 1.5\n\n tmax = 10*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim1.run(tmax)\n\n final1 = np.copy(self.syns.W)\n\n self.syns.W = np.copy(W0)\n\n rule2 = TwoExponentialsPlasticity(\n (self.syns.source, self.syns.target, self.syns.W),\n self.tutor, constrain_positive=False, rate=1-6)\n rule2.alpha = 1.0\n rule2.beta = 1.5\n\n sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, rule2, dt=self.dt)\n sim2.run(tmax)\n\n final2 = np.copy(self.syns.W)\n\n self.assertTrue(np.allclose(final1, final2))",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results"
] |
[
"0.6438255",
"0.6201443",
"0.61361796",
"0.6124959",
"0.6070049",
"0.6008676",
"0.59633213",
"0.59452456",
"0.59047467",
"0.5833178",
"0.58227533",
"0.57990956",
"0.57914954",
"0.5789886",
"0.57396877",
"0.57264465",
"0.5724978",
"0.5724084",
"0.57111406",
"0.57068664",
"0.5705639",
"0.57021755",
"0.57017756",
"0.5701112",
"0.56945115",
"0.5693045",
"0.56902826",
"0.5687992",
"0.5683522",
"0.5680704"
] |
0.7253336
|
0
|
to plot where a certain synapse and trace is on the hill function (only one trace/synapse at a time)
|
def plot_hill_func(self,sim_run=None,trace=0,synapse=0,average=False):
if sim_run is None:
sim_run = self.default_runs[0]
cav_hits = sim_run.data["Ca_t"][:,trace,synapse]
p_v_func = hill(np.arange(200)/100.,S=1,ec50=sim_run.params["ca_ec50"],n=sim_run.params["ca_coop"])
plt.plot(np.arange(200)/100.,p_v_func)
for i in range(len(cav_hits)):
plt.plot((cav_hits[i],cav_hits[i]),(0,1))
plt.ylabel('Probbility of Vesicle Release')
plt.xlabel('Calcium Concentration (arb. units)')
plt.title('Location of [Ca] response on Hill Function for sequential APs')
plt.show()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin",
"def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()",
"def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()",
"def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)",
"def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return",
"def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return",
"def graph(self, ax=None, logax=False, el=None):\n if ax == None:\n fig, ax = plt.subplots()\n\n ax.cla()\n ax.clear()\n # if element is defined, plot only one element, otherwise all\n if el:\n self.data.plot(ax=ax, y=el, kind='line', legend=False)\n else:\n self.data.plot(ax=ax, kind='line', legend=False)\n\n if logax:\n ax.set_yscale('log')\n\n if self.starts and self.ends:\n # create lines for start and end of each ablation\n for i in range(0, len(self.starts)):\n ax.axvline(x=self.time[self.starts[i]],\n color='blue', linewidth=2)\n for i in range(0, len(self.ends)):\n ax.axvline(x=self.time[self.ends[i]],\n color='blue', linewidth=2)\n\n if self.laser_off:\n # higlights bacground\n for off in self.laser_off:\n #print(self.time[off[0]], self.time[off[1]])\n try:\n ax.axvspan(\n self.time[off[0]], self.time[off[1]], alpha=0.2, color='red')\n except:\n warnings.warn('something is wrong')\n\n if self.laser_on:\n # higlihts ablation\n for on in self.laser_on:\n ax.axvspan(self.time[on[0]], self.time[on[1]],\n alpha=0.2, color='green')\n\n plt.show()",
"def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)",
"def plot():\n pass",
"def plotOfSingleSensor(self,index,plot='all'): #name='LFS01_S1'\n\t\tp1=_plot.plot(yLabel='V',xLabel='time [ms]',\n\t\t\t\t\t subtitle=self.sensorNames[index],title=self.title,\n\t\t\t\t\t shotno=self.shotno)\n\t\tif plot=='all' or plot=='raw':\n\t\t\tp1.addTrace(yData=self.solDataRaw[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Raw')\n\t\tif plot=='all' or plot=='fit': \n\t\t\tp1.addTrace(yData=self.solDataFit[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Fit') \n\t\tif plot=='all' or plot=='smoothed' or plot=='smoothedOnly': \n\t\t\tp1.addTrace(yData=self.solData[index],xData=self.time*1000,\n\t\t\t\t\t\tyLegendLabel=self.sensorNames[index]+' Without Offset') \n\t\treturn p1",
"def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()",
"def test_plot_hid(self):\n # also produce a light curve with the same binning\n command = ('{0} -b 100 --e-interval {1} {2}').format(\n os.path.join(self.datadir, 'monol_testA_nustar_fpma_ev_calib' +\n HEN_FILE_EXTENSION), 3, 10)\n\n hen.lcurve.main(command.split())\n lname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E3-10_lc') + \\\n HEN_FILE_EXTENSION\n os.path.exists(lname)\n cname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E_10-5_over_5-3') + \\\n HEN_FILE_EXTENSION\n hen.plot.main([cname, lname, '--noplot', '--xlog', '--ylog', '--HID',\n '-o', 'dummy.qdp'])",
"def plot_trace(self):\n az.plot_trace(self.ifd_)",
"def interactive_hess(gr,g):\n def plot(size=100):\n fig,ax = plt.subplots()\n fig.set_size_inches(8,6)\n ax.hexbin(gr, g, gridsize=size, bins='log', cmap='inferno', label=\"Relative stellar density\")\n ax.set_title(\"HESS DIAGRAM, gridsize={0:d}\".format(size), fontsize = 15)\n ax.set_xlabel(r\"$g-r$\",fontsize = 25)\n ax.set_ylabel(r\"$g$\",fontsize = 25)\n ax.legend(loc='upper left')\n ax.set_ylim(ax.get_ylim()[::-1])\n plt.show()\n interact(plot, size=(50,300,1),continuous_update=False);",
"def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()",
"def update_figure(picks, curve, active_well): \n w = p.get_well(active_well) ##selects the correct welly.Well object\n picks_df = pd.read_json(picks)\n picks_selected = picks_df[picks_df['UWI'] == active_well.replace(' ', '-')]\n \n # regenerate figure with the new horizontal line\n fig = helper.make_log_plot(w=w, ymin=ymin)# , resample=0.1) # resample needs a float to change basis\n fig.update_layout(uirevision=active_well)\n helper.update_picks_on_plot(fig, picks_selected)\n \n return fig",
"def plot(self):\n\t\tself.plotOfSpect()",
"def make_plot(x,y):",
"def plot_HI(ax, wavenum=False):\n # read in HI-lines\n path = pkg_resources.resource_filename(\"measure_extinction\", \"data/\")\n table = pd.read_table(path + \"HI_lines.list\", sep=r\"\\s+\", comment=\"#\")\n # group lines by series\n series_groups = table.groupby(\"n'\")\n colors = plt.get_cmap(\"tab10\")\n series_names = {\n 1: \"Ly\",\n 2: \"Ba\",\n 3: \"Pa\",\n 4: \"Br\",\n 5: \"Pf\",\n 6: \"Hu\",\n 7: \"7\",\n 8: \"8\",\n 9: \"9\",\n 10: \"10\",\n }\n for name, series in series_groups:\n # plot the lines\n for wave in series.wavelength:\n if wavenum:\n x = 1.0 / wave\n else:\n x = wave\n ax.axvline(x, color=colors(name - 1), lw=0.05, alpha=0.4)\n # add the name of the series\n mwave = series.wavelength.mean()\n if wavenum:\n xm = 1.0 / mwave\n else:\n xm = mwave\n ax.text(\n xm,\n 0.04,\n series_names[name],\n transform=ax.get_xaxis_transform(),\n color=colors(name - 1),\n ).set_clip_on(True)",
"def correctOverflows(self):\n plt.figure(11)\n plt.clf()\n plt.plot(self.raw['METROLOGY_DATA'].data.field('TIME'),\n self.raw['METROLOGY_DATA'].data.field('DELTAL') )\n plt.hlines([np.median(self.raw['METROLOGY_DATA'].data.field('DELTAL')),\n np.median(self.raw['METROLOGY_DATA'].data.field('DELTAL'))+self.metJumpSize,\n np.median(self.raw['METROLOGY_DATA'].data.field('DELTAL'))-self.metJumpSize],\n self.raw['METROLOGY_DATA'].data.field('TIME').min(),\n self.raw['METROLOGY_DATA'].data.field('TIME').max(), color='r')\n return",
"def scat_ts_pindex(self, scat=True):\r\n dfa, mc_seed = self.dfa, self.mc_seed\r\n\r\n def plotit(df, color, ax=None, title=None):\r\n if ax is None:\r\n fig, ax = plt.subplots(figsize=(8,8))\r\n else: fig=ax.figure\r\n lolat = np.abs(df.glat)<5\r\n if scat:\r\n ax.plot(df.ts, df.pindex.clip(0.5,3.5), '.', color='blue')\r\n ax.plot(df.ts[lolat], df.pindex[lolat].clip(0.5,3.5), '.', color='red', label='|b|<5')\r\n ax.axvline(25, ls=':', color='red')\r\n ax.set(xlim=(10,40), ylim=(0.5,3.5));\r\n ax.set(xlabel='TS', ylabel=r'$\\Gamma$')\r\n ax.axhline(2.8, ls=':', color='red')\r\n ax.axhline(1.6, ls=':', color='red')\r\n\r\n else:\r\n ts_cut = (df.ts>16) & (df.ts<32)\r\n hkw = dict(bins=np.linspace(0.5,3.5,16), histtype='step', lw=2)\r\n ax.hist(df.pindex[ts_cut & ~lolat], label='lolat', color='green', **hkw)\r\n ax.hist(df.pindex[ts_cut & lolat], label='hilat', color='orange', **hkw)\r\n ax.legend()\r\n\r\n ax.axvline(2.8, ls=':', color='red')\r\n ax.axvline(1.5, ls=':', color='red')\r\n if title is not None: ax.set_title(title)\r\n\r\n fig,axx = plt.subplots(2,2, figsize=(10,10), sharex=True, sharey=False)\r\n axf = axx.flatten()\r\n plotit(dfa, 'green', ax=axf[0], title=self.uwmodel)\r\n plotit(dfa.query('in_fl8y==False'), 'grey', ax=axf[2], title=self.uwmodel+' not in {}'.format(self.cat_name))\r\n plotit(mc_seed, 'orange', ax=axf[3], title='MC seeds')\r\n plotit(dfa.query('in_fl8y==True'), 'blue', ax=axf[1], title=self.cat_name)\r\n\r\n return fig",
"def plot_hwy(data_frame):\n fighwy, axhwy = plot_var(\n data_frame=data_frame,\n x_var=\"time\",\n y_var=\"hwy\",\n label_var=\"mpr\",\n pivot=\"flow\",\n x_label=\" Time [hh:mm]\",\n y_label=\"Headway space [m]\",\n t_label=\"Flow [veh/h]\",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n x_size=7.5,\n transpose=True,\n )\n return fighwy, axhwy",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def hysteresis(T = 1, dimensions = 2, J = 1, filename = \"hist\", hmax = 2.5):\r\n h = np.linspace(-hmax, hmax, 100)\r\n \r\n #size of lattice\r\n N = 20\r\n \r\n #forward tabulated magnetisations and backward going\r\n Mforward = np.zeros(h.shape)\r\n Mbackward = np.zeros(h.shape)\r\n \r\n #initial lattice\r\n lattice = initialiser(N, dimensions = dimensions)\r\n \r\n #anneal lattice\r\n lattice = anneal(lattice, T, 20)\r\n\r\n #forward scan over different values of strength\r\n for i in range(len(h)):\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[i], nonabsmag=True,\\\r\n dimensions= dimensions, J = J)\r\n Mforward[i] = np.mean(m)\r\n lattice = l\r\n \r\n #backward scan over different values of strength \r\n for i in range(len(h)):\r\n index = len(h) - 1 - i\r\n (m,e,l) = simulation(N, T, 200, lattice, h = h[index], nonabsmag=True,\\\r\n dimensions = dimensions, J = J)\r\n Mbackward[index] = np.mean(m)\r\n lattice = l\r\n \r\n #plot data\r\n f = makeplot(h, [Mforward, Mbackward], [\"Increasing h\", \"Decreasing h\"],\\\r\n \"External field, h $[J]$\", \"Magnetisation\")\r\n f.show()\r\n f.savefig(filename+\".svg\")",
"def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n else:\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(8, 4))\n\n ax.plot(x, 'b', lw=1)\n if ind.size:\n label = 'valley' if valley else 'peak'\n label = label + 's' if ind.size > 1 else label\n ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,\n label='%d %s' % (ind.size, label))\n ax.legend(loc='best', framealpha=.5, numpoints=1)\n ax.set_xlim(-.02*x.size, x.size*1.02-1)\n ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()\n yrange = ymax - ymin if ymax > ymin else 1\n ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)\n ax.set_xlabel('Data #', fontsize=14)\n ax.set_ylabel('Amplitude', fontsize=14)\n mode = 'Valley detection' if valley else 'Peak detection'\n #ax.set_title(\"Deuxième détection\")\n ax.set_title(\"%s (mph=%s, mpd=%d, threshold=%s, edge='%s')\"\n % (mode, str(mph), mpd, str(threshold), edge))\n # plt.grid()\n plt.show()",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def plot(list_hof,param):\n plt.figure() \n df = pandas.DataFrame(list_hof,\n columns=[\"name\",\"begin hour\",\"logon\",\"emails\",'device','web'])\n pandas.plotting.parallel_coordinates(df,\"name\")\n plt.title(param)\n lgd=plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.ylabel('Euclidean distance')\n name=results_path+param+'.png'\n plt.savefig(name, bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.show()",
"def plotPSTH(self, stimpath,\n stimdata,\n spikesdict,\n simtime,\n offset=0,\n binsize=10e-3,\n legendSuffix='',\n rate=False,\n normcells=True\n ):\n if not spikesdict:\n return 0\n stimdata = stimdata[:]\n times = []\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in stimpath:\n times = stimdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in stimpath:\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[False, numpy.diff(stimdata) < 0].nonzero()[0]]\n else:\n stimdata = analyzer.smooth(stimdata)\n mid = numpy.mean(stimdata)\n stimdata = stimdata[stimdata > mid] # Threshold at midpoint\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[True, stimdata[1:] > stimdata[:-1]] & numpy.r_[stimdata[:-1] > stimdata[1:], True]]\n if (times is None) or (len(times) == 0):\n return 0\n start = times + offset\n end = numpy.zeros(times.shape)\n end[:-1] = start[1:]\n end[-1] = simtime + offset # We assume\n accumulated_data = []\n for spikedata in spikesdict.values():\n tpoints = spikedata[:]\n for ii in range(len(times)):\n ix = numpy.nonzero((tpoints >= start[ii]) & (tpoints < end[ii]))[0]\n accumulated_data = numpy.r_[accumulated_data, tpoints[ix] - times[ii]]\n if len(accumulated_data) == 0:\n return 0\n # set the bins by splitting interstimulus interval\n interval = numpy.mean(numpy.diff(times))\n bins = numpy.arange(offset, interval+offset, binsize)\n bins = numpy.r_[bins, bins[-1] + binsize]\n hist = numpy.histogram(accumulated_data, bins=bins)\n xx = (hist[1][:-1] + hist[1][1:])/2.0\n if rate:\n yy = hist[0] / binsize\n else:\n yy = hist[0]\n if normcells:\n yy /= len(spikesdict)\n path = stimpath + '_psth' + legendSuffix\n new_curve = Qwt.QwtPlotCurve(path)\n new_curve.setData(xx, yy)\n pen = Qt.QPen(Qt.Qt.blue, 1, Qt.Qt.DashDotLine)\n new_curve.setStyle(Qwt.QwtPlotCurve.Lines)\n new_curve.setPen(pen)\n pen = Qt.QPen(Qt.Qt.red, 1)\n new_curve.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.XCross,\n Qt.QBrush(),\n pen,\n Qt.QSize(3,3))) \n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n path = stimpath + '_bins' + legendSuffix\n histmarkers = Qwt.QwtPlotCurve(path)\n height = int(max(yy) + 0.5)\n yy = numpy.ones(hist[1].shape) * height\n histmarkers.setData(hist[1], yy)\n pen = Qt.QPen(Qt.Qt.black, 1, Qt.Qt.DotLine)\n histmarkers.setPen(pen)\n histmarkers.setStyle(Qwt.QwtPlotCurve.Sticks)\n histmarkers.attach(self)\n self.curve_path_dict[histmarkers] = path\n self.path_curve_dict[path].append(new_curve)\n self.clearZoomStack()\n self.replot()\n return 1",
"def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):\n\ttry:\n\t\timport matplotlib.pyplot as plt\n\texcept ImportError:\n\t\tprint('matplotlib is not available.')\n\telse:\n\t\tif ax is None:\n\t\t\t_, ax = plt.subplots(1, 1, figsize=(8, 4))\n\t\t\tax.plot(x, 'b', lw=1)\n\t\tif ind.size:\n\t\t\tlabel = 'valley' if valley else 'peak'\n\t\t\tlabel = label + 's' if ind.size > 1 else label\n\t\t\tax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,\n\t\t\tlabel='%d %s' % (ind.size, label))\n\t\t\tax.legend(loc='best', framealpha=.5, numpoints=1)\n\t\tax.set_xlim(-.02*x.size, x.size*1.02-1)\n\t\tymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()\n\t\tyrange = ymax - ymin if ymax > ymin else 1\n\t\tax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)\n\t\tax.set_xlabel('Data #', fontsize=14)\n\t\tax.set_ylabel('Amplitude', fontsize=14)\n\t\tmode = 'Valley detection' if valley else 'Peak detection'\n\t\tax.set_title(\"%s (mph=%s, mpd=%d, threshold=%s, edge='%s')\"\n\t\t% (mode, str(mph), mpd, str(threshold), edge))\n plt.show()"
] |
[
"0.650976",
"0.6214818",
"0.6204351",
"0.619511",
"0.6171883",
"0.61140436",
"0.6093575",
"0.6076139",
"0.60694313",
"0.60479563",
"0.60235536",
"0.60000885",
"0.59608746",
"0.59412897",
"0.5924625",
"0.59066564",
"0.5855065",
"0.5822589",
"0.5816968",
"0.581117",
"0.5810692",
"0.5807872",
"0.58036804",
"0.58005494",
"0.57943374",
"0.5787271",
"0.57858515",
"0.577103",
"0.5759331",
"0.57511187"
] |
0.7128453
|
0
|
>>> download_agents()[0].x Donwloads data from website, checks the first agent's position 20
|
def download_agents():
#this is a doc test.
r = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')
content = r.text
soup = bs4.BeautifulSoup(content, 'html.parser')
td_ys = soup.find_all(attrs={"class": "y"})
td_xs = soup.find_all(attrs={"class": "x"})
# print(td_ys) # I commented out this line - this makes the doctest fail
# print(td_xs) # I commented out this line as well as makes the doctest fail
website_data= [] # make a new list
for i in range(len(td_ys)):
#I did put a len function here to check how long the list is
#make a new empty list
y = int(td_ys[i].text)
x = int(td_xs[i].text)
#I changed the below to use the empty list that i created not the agents.append
website_data.append(agentframework_brigi.Agent(environment, [], y, x ))
return website_data # challange: this was in the for loop, and exited from the
# function after 1 iteration (that's why i only got one agent in the animation).
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def download_agent_if_missing(filename):\n if file_missing(filename):\n print filename+'is missing, downloading it first'\n download(filename)",
"def fetch_urls(browser, number_publications):\n links = []\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n n_scrolls = scrolls(number_publications)\n\n for i in range(\n n_scrolls\n ): # collecting all the pictures links in order to see which ones contains location data\n print(\n Fore.WHITE +\n \"Scrolling the Instagram target profile, scraping pictures URLs ...\"\n + str(100 * i // n_scrolls) + \"% of the profile scrolled \",\n end=\"\\r\")\n browser.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight)\")\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n time.sleep(\n 1\n ) # dont change this, otherwise some scrolls won't be effective and all the data won't be scrapped\n\n print(Fore.WHITE + \"\\nPictures links collected: \" + Fore.GREEN + \"OK\")\n return list(dict.fromkeys(links)) # remove duplicates",
"def get_cheapest_flight():\n\n user_agent = [\n\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',\n 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:24.0) Gecko/20100101 Firefox/24.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0',\n 'Opera/9.80 (Linux armv7l) Presto/2.12.407 Version/12.51 , D50u-D1-UHD/V1.5.16-UHD (Vizio, D50u-D1, Wireless)',\n\n ]\n\n proxy_list = ssl_proxies()\n # print(len(proxy_list))\n random_agent = random.randint(0, len(user_agent)-1)\n random_proxy = random.randint(0, len(proxy_list)-1)\n\n headers = { 'User-agent' : user_agent[random_agent],\n 'Connection' : 'close',\n 'Upgrade-Insecure-Requests': '1',\n 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Sec-Fetch-Site' : 'same-origin',\n 'Sec-Fetch-Mode' : 'navigate',\n 'Sec-Fetch-User' : '?1',\n 'Sec-Fetch-Dest' : 'document',\n 'Referer' : 'https://www.cheapflights.co.za/',\n 'Accept-Encoding': 'gzip, deflate' ,\n 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',\n }\n\n if check_proxy_status:\n proxies = proxy_list[random_proxy]\n print(\"Using http proxy.\")\n else:\n print(\"Proxy Down, using original ip.\")\n proxies = {}\n\n\n now = datetime.datetime.now()\n\n request_session = requests.Session()\n flight_request_uri = f\"https://www.cheapflights.co.za/flight-search/{arguments.cfrom}-{arguments.to}/{now.year}-{arguments.month}-{arguments.day}?sort=price_a\"\n\n # print(colored(flight_request_uri, 'white')) the url.\n # print(len(request.text))\n request = request_session.get(flight_request_uri, headers=headers, proxies=proxies)\n\n length = len(request.text)\n if request.text.find(\"\"\"If you are seeing this page, it means that Cheapflights thinks you are a \"bot,\" and the \n page you were trying to get to is only useful for humans.\"\"\") > -1 :\n print(colored('Ithi Uyi\\'Robot Leshandis', 'red', attrs=['bold', 'blink']))\n \n cheapest = re.search(\"\"\"Cheapest\\n</\\w+>\\n</\\w+>\\n</\\w+>\\n</\\w+>\\n<\\w+\\s\\w+=\"\\w+\\s\\w+\">\\n<\\w+\\s\\w+='\\w+-\\w+\\s\\w+-\\\n w+\\s\\w+\\s\\w+\\s\\w+\\s\\w+\\s\\w+'\\n>\\nR\\d\\s\\d{3}\\n|R\\d{3}\\n\"\"\", request.text)\n\n try:\n get_flights(request.text)\n return(cheapest.group(0).rstrip())\n except AttributeError:\n return (colored(\"Something went wrong, Try again.\", 'red'))",
"async def org_info_below_13(org_urls13):\n org_info_till13 = []\n project_urls_till13 = []\n for url in org_urls13:\n # General information about the org\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[0].text.splitlines()[-1].strip()\n mailing_list = org_info[1].text.split(\":\")[-1].strip()\n detail = org_info[2].text\n org_info_till13.append({'name': org_name, 'about': detail,\n 'page': web_page, 'mail': mailing_list,\n 'link': url})\n project_urls_till13.extend(grab_project_links(soup))\n\n except IndexError:\n print(url)\n\n return org_info_till13, get_project_info(project_urls_till13)",
"def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines",
"def detect_spider(self, data):\n for ip in data.keys():\n count = data[ip][\"count\"]\n last_time = data[ip][\"ep_time\"][0]\n initial_time = data[ip][\"ep_time\"][int(len(data[ip][\"ep_time\"]) - 1)]\n delta = abs(int(last_time - initial_time))\n\n try:\n calc_count_thresh = count / delta\n calc_get_thresh = len(data[ip][\"unique_get\"]) / delta\n except ZeroDivisionError:\n calc_count_thresh = count\n calc_get_thresh = len(data[ip][\"unique_get\"])\n\n if (calc_count_thresh > self._THRESHOLD or\n calc_get_thresh > self._THRESHOLD or\n self.payload_match(data[ip][\"ua\"])):\n if ip not in self.logged_IP:\n self.logged_IP.append(ip)\n self.logger.log(\n \"Possible web crawler / spider / bad user agent detected from: \" + str(ip),\n logtype=\"warning\"\n )\n utils.write_ip(str(ip))\n # Generate CSV report using OSINT tools\n self.osint_obj.perform_osint_scan(ip.strip(\" \"))\n # Write malicious IP to file, to teach Firewall about the IP\n write_mal_ip(ip.strip(\" \"))",
"def identify_num_simulate():\r\n # http / https ip:port\r\n proxy = {'http': 'http://112.85.165.113:9999'}\r\n response = requests.get(base_url + '/get', proxies=proxy)\r\n print(response.status_code)\r\n print(response.text)",
"def can_fetch(self, agent, url):\n if not url.startswith(self.base_url):\n url = urljoin(self.base_url, url)\n if self.robot_parser:\n return self.robot_parser.can_fetch(agent, url)\n return True",
"def link_is_present(driver, delay, selector, index, results_page):\n try:\n WebDriverWait(driver, delay).until(\n EC.presence_of_element_located(\n (By.XPATH, selector)\n )\n )\n print(\"**************************************************\")\n print(\"\\nScraping data for result {}\" \\\n \" on results page {} \\n\".format(index, results_page))\n except Exception as e:\n print(e)\n if index < 25:\n print(\"\\nWas not able to wait for job_selector to load. Search \" \\\n \"results may have been exhausted.\")\n return True\n else:\n return False\n else:\n return True",
"def _check_grib(self, url):\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()['Content-Length']) > 1_000_000\n return check_exists and check_content\n else:\n return False",
"async def org_info_above_14(orgs_urls14):\n org_info_14 = []\n project_urls_from14 = []\n for url in orgs_urls14:\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[1].text.splitlines()[-1].strip()\n mailing_list = org_info[2].text.split(\":\")[-1].strip()\n description = soup.find('div', {'class': 'main mdl-cell mdl-cell--8-col\\\n mdl-card mdl-shadow--4dp'})\n detail = description.find_all('p')[2].nextSibling\n org_info_14.append({'name': org_name, 'page': web_page,\n 'about': detail, 'mail': mailing_list,\n 'link': url})\n project_urls_from14.extend(grab_project_links(soup))\n except IndexError:\n print(url)\n\n return org_info_14, get_project_info(project_urls_from14)",
"def get_proxies():\n # url = 'http://nntime.com//'\n url = 'https://free-proxy-list.net/'\n\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr'):\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n return proxies",
"def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))",
"def getgoalies(league, year):\n\n url = 'https://www.eliteprospects.com/league/' + league + '/stats/' + year + '?page-goalie='\n # print('Collects data from ' + 'https://www.eliteprospects.com/league/' + league + '/stats/' + year)\n \n print(\"Beginning scrape of \" + league + \" goalie data from \" + year + \".\")\n \n # Return list with all plyers for season in link \n players = []\n \n page = (requests.get(url + str(1) + \"#goalies\", timeout = 500))\n first_page_string = str(page)\n \n while first_page_string == '<Response [403]>':\n print(\"Just got a 403 Error before entering the page. This means EliteProspects has temporarily blocked your IP address.\")\n print(\"We're going to sleep for 60 seconds, then try again.\")\n time.sleep(100)\n page = (requests.get(url + str(1) + \"#goalies\", timeout = 500))\n first_page_string = str(page)\n print(\"Okay, let's try this again\")\n \n if (first_page_string) == '<Response [404]>':\n print(\"ERROR: \" + first_page_string + \" on league: \" + league + \" in year: \" + year + \". Data doesn't exist for this league and season.\")\n \n else:\n \n for i in range(1,99):\n page = requests.get(url+str(i), timeout = 500)\n page_string = str(page)\n \n while page_string == '<Response [403]>':\n print(\"Just got a 403 Error within the page. Time to Sleep, then re-obtain the link.\")\n time.sleep(100)\n page = (requests.get(url+str(i), timeout = 500))\n page_string = str(page)\n print(\"Changed the string within the page. Let's try again\")\n \n soup = BeautifulSoup(page.content, \"html.parser\")\n\n # Get data for players table\n player_table = soup.find(\"table\", {\"class\":\"table table-striped table-sortable goalie-stats highlight-stats season\"})\n\n try:\n df_players = tableDataText(player_table)\n except AttributeError:\n print(\"BREAK: TABLE NONE ERROR: \" + str(requests.get(url+str(i), timeout = 500)) + \" On League: \" + league + \" In Year: \" + year)\n break\n \n if len(df_players)>0:\n\n if df_players['#'].count()>0:\n # Remove empty rows\n df_players = df_players[df_players['#']!=''].reset_index(drop=True)\n\n # Extract href links in table\n href_row = []\n for link in player_table.find_all('a'):\n href_row.append(link.attrs['href'])\n\n # Create data frame, rename and only keep links to players\n df_links = pd.DataFrame(href_row) \n df_links.rename(columns={ df_links.columns[0]:\"link\"}, inplace=True)\n df_links= df_links[df_links['link'].str.contains(\"/player/\")].reset_index(drop=True) \n\n # Add links to players\n df_players['link']=df_links['link'] \n\n players.append(df_players)\n\n # Wait 3 seconds before going to next\n # time.sleep(1)\n #print(\"Scraped page \" + str(i))\n \n else:\n #print(\"Scraped final page of: \" + league + \" In Year: \" + year)\n break\n\n \n if len(players)!=0:\n df_players = pd.concat(players).reset_index()\n\n df_players.columns = map(str.lower, df_players.columns)\n\n # Clean up dataset\n df_players['season'] = year\n df_players['league'] = league\n\n df_players = df_players.drop(['index','#'], axis=1).reset_index(drop=True)\n \n print(\"Successfully scraped all \" + league + \" goalie data from \" + year + \".\")\n \n df_players = df_players.loc[((df_players.gp!=0) & (~pd.isna(df_players.gp)) & (df_players.gp!=\"0\") & (df_players.gaa!=\"-\"))]\n\n return df_players\n else: print(\"LENGTH 0 ERROR: \" + str(requests.get(url+str(1), timeout = 500)) + \" On League: \" + league + \" In Year: \" + year)",
"def find_home_depot(urls):\n if isinstance(urls, str):\n urls = [urls]\n\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}\n\n for i, url in enumerate(urls):\n urls[i] = (url, None, False)\n\n try:\n r = requests.get(url, headers=headers)\n except Exception as e:\n logger.info(F\"Error in looking for {url}\")\n logger.info(e)\n continue\n response_json = r.json()\n\n # If there is \"errorData\" present, move on\n try:\n a = response_json['DeliveryAvailabilityResponse'][\"errorData\"]\n logger.info(F\"Home Depot returned an error on {url} . Response: {a}\")\n continue\n except:\n pass\n\n try:\n available = response_json['DeliveryAvailabilityResponse']['deliveryAvailability']['availability'][0]['status']\n item_id = int(response_json['DeliveryAvailabilityResponse']['deliveryAvailability']['availability'][0]['itemId'])\n\n urls[i] = (url, item_id, available != \"OOS_ETA_UNAVAILABLE\")\n except KeyError as e:\n logger.error(F\"Problem parsing Home Depot's response on: {url}\")\n logger.error(e)\n\n return urls",
"def get_info(link):\n \n page = requests.get(link, timeout = 500)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n page_string = str(page)\n\n while ((page_string == '<Response [403]>') or (\"evil\" in str(soup.p))): \n print(\"403 Error. re-obtaining string and re-trying.\")\n page = requests.get(link, timeout = 500)\n page_string = str(page)\n soup = BeautifulSoup(page.content, \"html.parser\")\n time.sleep(60)\n\n if soup.find(\"title\") != None:\n player = soup.find(\"title\").string.replace(\" - Elite Prospects\" ,\"\")\n\n else: player = \"-\"\n \n if soup.find(\"div\", {\"class\":\"order-11 ep-list__item ep-list__item--in-card-body ep-list__item--is-compact\"})!=None:\n rights = soup.find(\"div\", {\"class\":\"order-11 ep-list__item ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\"div\", {\"class\":\"col-xs-12 col-18 text-right p-0\"}).find(\"span\").string.split(\"\\n\")[1].split(\"/\")[0].strip()\n status = soup.find(\"div\", {\"class\":\"order-11 ep-list__item ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\"div\", {\"class\":\"col-xs-12 col-18 text-right p-0\"}).find(\"span\").string.split(\"\\n\")[1].split(\"/\")[1].strip()\n else:\n rights = \"-\"\n status = \"-\"\n \n if (soup.find(\"div\", {\"class\":\"col-xs-12 col-17 text-right p-0 ep-text-color--black\"}))!= None:\n if 'dob' in (soup.find(\"div\", {\"class\":\"col-xs-12 col-17 text-right p-0 ep-text-color--black\"})).find(\"a\")['href']:\n dob = soup.find(\"div\", {\"class\":\"col-xs-12 col-17 text-right p-0 ep-text-color--black\"}).find(\"a\")['href'].split(\"dob=\", 1)[1].split(\"&sort\", 1)[0]\n else: \n dob = \"-\"\n\n else:\n dob = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-6 order-sm-3 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}) != None:\n if \"cm\" in soup.find(\"div\", {\"class\":\"order-6 order-sm-3 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).string:\n height = soup.find(\"div\", {\"class\":\"order-6 order-sm-3 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).string.split(\" / \")[1].split(\"cm\")[0].strip()\n else: \n height = \"-\"\n\n else: \n height = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-7 order-sm-5 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}) != None:\n if soup.find(\"div\", {\"class\":\"order-7 order-sm-5 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).string.split(\"\\n\")[1].split(\"lbs\")[0].strip() == '- / -':\n weight = \"-\"\n else: \n weight = soup.find(\"div\", {\"class\":\"order-7 order-sm-5 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).string.split(\"\\n\")[1].split(\"lbs\")[0].strip()\n\n else: weight = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-2 order-sm-4 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ) != None:\n if soup.find(\"div\", {\"class\":\"order-2 order-sm-4 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-17 text-right p-0 ep-text-color--black\"}).find(\"a\") != None:\n\n birthplace = soup.find(\"div\", {\"class\":\"order-2 order-sm-4 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-17 text-right p-0 ep-text-color--black\"}).find(\"a\").string.replace(\"\\n\", \"\").strip()\n\n else: \n birthplace = \"-\"\n else: \n birthplace = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-3 order-sm-6 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}) != None:\n if soup.find(\"div\", {\"class\":\"order-3 order-sm-6 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).find(\"a\") != None:\n nation = soup.find(\"div\", {\"class\":\"order-3 order-sm-6 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).find(\"a\").string.replace(\"\\n\", \"\").strip()\n else: nation = \"-\"\n\n else:\n nation = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-8 order-sm-7 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}) !=None:\n shoots = soup.find(\"div\", {\"class\":\"order-8 order-sm-7 ep-list__item ep-list__item--col-2 ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0 ep-text-color--black\"}).string.replace(\"\\n\", \"\").strip()\n\n else:\n shoots = \"-\"\n\n if soup.find(\"div\", {\"class\":\"order-12 ep-list__item ep-list__item--in-card-body ep-list__item--is-compact\"}) != None:\n draft = soup.find(\"div\", {\"class\":\"order-12 ep-list__item ep-list__item--in-card-body ep-list__item--is-compact\"}\n ).find(\n \"div\", {\"class\":\"col-xs-12 col-18 text-right p-0\"}).find(\"a\").string.replace(\"\\n\", \"\").strip()\n else: \n draft = \"-\"\n\n #height = np.where(height==\"- / -\", \"-\", height)\n\n #print(player + \" scraped!\")\n return(player, rights, status, dob, height, weight, birthplace, nation, shoots, draft, link)",
"def test_apogee_visit_download(self):\n # make sure the download works correctly\n visit_spectra(dr=13, location=4405, apogee=\"2M19060637+4717296\")\n visit_spectra(dr=14, location=4405, apogee=\"2M19060637+4717296\")\n visit_spectra(\n dr=16, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n visit_spectra(\n dr=17, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n # assert False is returning if file not found\n self.assertEqual(\n visit_spectra(dr=13, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n visit_spectra(dr=14, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n visit_spectra(\n dr=16,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n self.assertEqual(\n visit_spectra(\n dr=17,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n # assert error if DR not supported\n self.assertRaises(\n ValueError, visit_spectra, dr=1, location=4406, apogee=\"2M19060637+4717296\"\n )",
"def check_downloaded(self):\n for o in self.order_lst:\n for item in o.get_items():\n mdata = item.get_metadata()\n if 'downloaded' in mdata.keys():\n if str(mdata['downloaded']) == 'True':\n return True\n \n return False",
"def __init__(self, url):\n self.url = url\n self.driver = webdriver.Chrome(\"chromedriver.exe\")\n self.driver.get(\"https://www.osmania.ac.in/res07/20210211.jsp\")\n self.results = {}\n self.roll_nos = []\n self.names = []\n self.sgpa = []\n self.threshold = [245319733180, 245319737120]",
"def get_all_headlines_from_firefox_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_firefox()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines",
"def proxy_scrape(self):\n print(\"Getting new live proxies\")\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:20]:\n # if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')\n [0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n # return proxies\n # proxies=[]\n print(\"Obtained proxied are as : \", proxies)\n proxy_pool = cycle(proxies)\n proxy_list = [proxy for proxy in proxies]\n return proxy_pool, proxy_list",
"def getMNACGenerator():\n\n # 0 - 89 (something between 80 and 90\n searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?title_1=&title=&field_piece_inventory_number_value=&keys=&field_piece_type_value_i18n[0]=pintura&&&page=%s'\n # 0 - 48, for some reason not all paintings get returned in the main query\n # searchBaseUrl = u'http://www.museunacional.cat/en/advanced-piece-search?field_piece_type_value_i18n[0]=pintura&field_piece_info_content_value[p.%%2019th]=p.%%2019th&field_piece_info_content_value[q.%%2020th]=q.%%2020th&&page=%s'\n htmlparser = HTMLParser.HTMLParser()\n\n foundit=True\n\n for i in range(0, 89):\n searchUrl = searchBaseUrl % (i,)\n print searchUrl\n searchPage = urllib2.urlopen(searchUrl)\n searchPageData = searchPage.read()\n\n searchRegex = u'\\<a href\\=\\\"(\\/en\\/colleccio\\/[^\\\"]+)\\\"\\>Read more\\<\\/a\\>'\n itemmatches = re.finditer(searchRegex, searchPageData)\n urllist = []\n #for match in matches:\n # try:\n # # #bla = unicode(match.group(1), u'utf-8')\n # urllist.append(u'http://www.dulwichpicturegallery.org.uk%s' % (match.group(1),))\n # except UnicodeDecodeError:\n # pywikibot.output(u'Found an url I cannot parse: %s' % (unicode(match.group(1), u'utf-8'),))#\n\n #print len(urllist)\n #urlset = set(urllist)\n #print len(urlset)\n\n\n for itemmatch in itemmatches:\n url = u'http://www.museunacional.cat%s' % (itemmatch.group(1),)\n print url\n\n if url==u'http://adsfasdfasdf':\n foundit=True\n if not foundit:\n continue\n metadata = {}\n\n metadata['collectionqid'] = u'Q861252'\n metadata['collectionshort'] = u'MNAC'\n metadata['locationqid'] = u'Q861252'\n metadata['instanceofqid'] = u'Q3305213'\n \n metadata['url'] = url\n\n itemPage = urllib2.urlopen(url)\n itemPageData = unicode(itemPage.read(), u'utf-8')\n \n #print itemPageEnData\n titleRegex = u'<li class=\"ca first\"><a href=\"/ca/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"ca\" title=\"([^\\\"]+)\">Català</a></li>[\\r\\n\\t\\s]*<li class=\"es\"><a href=\"/es/colleccio/[^\\\"]+\" class=\"language-link\" xml:lang=\"es\" title=\"([^\\\"]+)\">Español</a></li>[\\r\\n\\t\\s]*<li class=\"en last active\"><a href=\"/en/colleccio/[^\\\"]+\" class=\"language-link active\" xml:lang=\"en\" title=\"([^\\\"]+)\">English</a></li>'\n #titleEnRegex = u'<main class=\"main narrow\">[\\r\\n\\t\\s]+<h1>[\\r\\n\\t\\s]*([^<]+)[\\r\\n\\t\\s]*</h1>'\n creatorRegex = u'<div class=\"ds-author-piece\">([^<]+)</div>'\n dateRegex = u'Painting<div class=\"ds-feature\"><p>(\\d\\d\\d\\d)</p></div>' #FIXME: Only matches on real years\n invRegex = u'Inventory number: </div><p>([^<]+)</p>'\n\n # Could also get Dimensions, Materials, Acquisition\n \n matchTitle = re.search(titleRegex, itemPageData)\n if not matchTitle:\n pywikibot.output(u'The title data for this painting is BORKED!')\n continue\n\n #FIXME: Check encoding\n\n metadata['title'] = { u'ca' : htmlparser.unescape(matchTitle.group(1)),\n u'es' : htmlparser.unescape(matchTitle.group(2)),\n u'en' : htmlparser.unescape(matchTitle.group(3)),\n }\n \n #pywikibot.output(metadata.get('title'))\n\n creatorMatch = re.search(creatorRegex, itemPageData)\n if not creatorMatch:\n pywikibot.output(u'The creator data for this painting is BORKED!')\n continue\n\n #FIXME: Add some logic for work after and clean up\n\n name = htmlparser.unescape(creatorMatch.group(1))\n # We need to normalize the name\n if u',' in name:\n (surname, sep, firstname) = name.partition(u',')\n name = u'%s %s' % (firstname.strip(), surname.strip(),)\n metadata['creatorname'] = name\n \n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n u'ca' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n u'es' : u'%s de %s' % (u'pintura', metadata.get('creatorname'),),\n }\n\n\n invMatch = re.search(invRegex, itemPageData)\n\n if not invMatch:\n pywikibot.output(u'No inventory number found! Skipping')\n continue\n \n metadata['id'] = invMatch.group(1)\n metadata['idpid'] = u'P217'\n\n dateMatch = re.search(dateRegex, itemPageData)\n\n if dateMatch:\n metadata['inception'] = dateMatch.group(1)\n\n yield metadata",
"def pangea_scrape(target, landing_zone=os.path.join('static', 'data')):\n with open(target) as open_download_list:\n for line in open_download_list:\n items = [x.strip() for x in line.split('\\t')]\n if re.match('^\\d{4}-.+', items[0]): # if download list line starts with a date\n link = items[-1]\n download = requests.get(link)\n download_path = os.path.join(landing_zone,\n os.path.basename(link))\n with open(download_path, 'w') as fid:\n print(link, download.status_code)\n fid.write(download.content)",
"def get_goods_id_first(self, origin_url, index):\n\n origin_url = origin_url.replace('https', 'http')\n # first_result = proxy_req(origin_url, 0)\n first_result = basic_req(origin_url, 0, header=self.headers)\n\n if not first_result or len(first_result.find_all('script')) < 2:\n if can_retry(origin_url):\n self.get_goods_id_first(origin_url, index)\n return\n\n wait = first_result.find_all('script')[1].text\n if not '\"title\":\"' in wait:\n return\n title = re.findall(\n '\"title\":\".*\",\"', wait)[0].split('\",\"')[0].split('\":\"')[1]\n if title in self.title2map:\n self.goods_map[index] = self.title2map[title]\n self.url2goods[origin_url] = self.title2map[title]\n\n print(self.title2map[title])\n else:\n print(title)\n # url = re.findall('var url = .*\\'', wait)[0].split('\\'')[1]\n # self.get_goods_second(url, index)",
"def get_robots_content(self):\n logger.info(\"Preparing to download robots.txt content\")\n downloader = HttpDownloader()\n self.content, self.status_code = downloader.get_robots_file(base_url=self.page_url)\n logger.info(\"GET robots.txt status code %s\" % self.status_code)\n return self.content, self.status_code",
"def validate_proxies(proxies, url):\n proxies_working = []\n random.shuffle(proxies)\n for proxy in proxies:\n bad_proxy = is_bad_proxy(proxy, url)\n if not bad_proxy:\n print(proxy, \"APPROVED!\")\n proxies_working.append({'http':proxy})\n if len(proxies_working) == 5:\n break\n elif str(bad_proxy)[0] == '5' and not proxies:\n print('This service is now unavailable (site from scraping is unavailable)')\n if not proxies_working:\n return 0\n return proxies_working",
"def download(self, url_list):\n for url in url_list:\n suitable_found = False\n for ie_var in self._ies:\n # Go to next InfoExtractor if not suitable\n if not ie_var.suitable(url):\n continue\n # Suitable InfoExtractor found\n suitable_found = True\n # Extract information from URL and process it\n ie_var.extract(url)\n # Suitable InfoExtractor had been found; go to next URL\n break\n if not suitable_found:\n self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)\n return self._download_retcode",
"def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome",
"def parse_listing(keyword,place):\n\turl = \"https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}\".format(keyword,place)\n\tprint(\"retrieving \",url)\n\n\theaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n\t\t\t\t'Accept-Encoding':'gzip, deflate, br',\n\t\t\t\t'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n\t\t\t\t'Cache-Control':'max-age=0',\n\t\t\t\t'Connection':'keep-alive',\n\t\t\t\t'Host':'www.yellowpages.com',\n\t\t\t\t'Upgrade-Insecure-Requests':'1',\n\t\t\t\t'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n\t\t\t}\n\t# Adding retries\n\tfor retry in range(10):\n\t\ttry:\n\t\t\tresponse = requests.get(url,verify=False, headers = headers )\n\t\t\tprint(\"parsing page\")\n\t\t\tif response.status_code==200:\n\t\t\t\tparser = html.fromstring(response.text)\n\t\t\t\t#making links absolute\n\t\t\t\tbase_url = \"https://www.yellowpages.com\"\n\t\t\t\tparser.make_links_absolute(base_url)\n\n\t\t\t\tXPATH_LISTINGS = \"//div[@class='search-results organic']//div[@class='v-card']\"\n\t\t\t\tlistings = parser.xpath(XPATH_LISTINGS)\n\t\t\t\tscraped_results = []\n\n\t\t\t\tfor results in listings:\n\t\t\t\t\tXPATH_BUSINESS_NAME = \".//a[@class='business-name']//text()\"\n\n\t\t\t\t\tXPATH_WEBSITE = \".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href\"\n\n\t\t\t\t\traw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n\n\t\t\t\t\traw_website = results.xpath(XPATH_WEBSITE)\n\n\n\t\t\t\t\tbusiness_name = ''.join(raw_business_name).strip() if raw_business_name else None\n\n\t\t\t\t\twebsite = ''.join(raw_website).strip() if raw_website else None\n\n\n\n\n\n\t\t\t\t\tbusiness_details = {\n\t\t\t\t\t\t\t\t\t\t'business_name':business_name,\n\n\t\t\t\t\t\t\t\t\t\t'website':website\n\n\t\t\t\t\t}\n\t\t\t\t\tscraped_results.append(business_details)\n\t\t\t\t\tprint(scraped_results)\n\t\t\t\treturn scraped_results\n\n\t\t\telif response.status_code==404:\n\t\t\t\tprint(\"Could not find a location matching\",place)\n\t\t\t\t#no need to retry for non existing page\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Failed to process page\")\n\t\t\t\treturn []\n\n\t\texcept:\n\t\t\tprint(\"Failed to process page\")\n\t\t\treturn []",
"def get_downloadable_data(url_list):\n downloadable_data_list = []\n for url in url_list:\n soup = visit_homepage(url)\n for link in soup.find_all(class_='resource-url-analytics'):\n downloadable_data_list.append(link['href'])\n return downloadable_data_list"
] |
[
"0.5376987",
"0.536853",
"0.5361143",
"0.53058916",
"0.5111806",
"0.51035786",
"0.50865966",
"0.50805527",
"0.50465304",
"0.5031586",
"0.5016632",
"0.5014493",
"0.49960774",
"0.4984048",
"0.4972873",
"0.49680972",
"0.49671388",
"0.4966262",
"0.4927262",
"0.49094528",
"0.4894572",
"0.48938632",
"0.4888881",
"0.4873605",
"0.48658273",
"0.48598835",
"0.48548976",
"0.48546305",
"0.4853532",
"0.48459744"
] |
0.5899962
|
0
|
Plots the average estimated response and average observed response for groups of a specified variable. A bar chart to display the count within each group is also provided.
|
def avg_response(df, x, y_obs, y_est, save=False, show=True):
fig, ax1 = plt.subplots(figsize=(15,15))
ax2 = ax1.twinx()
x_name = x
if df[x].dtype == "int":
x = df[x].astype("category")
elif df[x].dtype == "float":
x = pd.cut(df[x], bins=10)
metrics = {"mean":"mean", "std err":"sem", "count":"count"}
df_grouped = df.groupby([x])[y_obs, y_est].agg(metrics)
x_vals = range(len(df_grouped))
y_vals = df_grouped["mean"][y_est]
ax1.errorbar(x_vals, y_vals,yerr=df_grouped["std err"][y_est], fmt='-',
marker='o',color="R", mec='black', ms=10, mew=2, linewidth=4,
capsize=10, elinewidth=2)
y_vals = df_grouped["mean"][y_obs]
ax1.plot(x_vals, y_vals, '-', label=y_obs, marker='o',
color = "G",mec='black', ms=10, mew=2, linewidth=4)
y_vals = df_grouped["count"][y_obs]
ax2.bar(x_vals,y_vals, color='DarkSlateGray', alpha = 0.25)
ax1.set_xlim(x_vals[0]-0.2,x_vals[-1]+1)
x_levels = list(y_vals.index)
plt.xticks(x_vals, x_levels)
ax1.set_xticklabels(x_levels, rotation=45)
ax1.grid(False)
ax2.grid(False)
font_size = 20
ax1.set_xlabel(x_name, fontsize=font_size)
ax1.set_ylabel(y_obs, fontsize=font_size)
ax2.set_ylabel("count", fontsize=font_size)
plt.title("Average {y} for groups of {x}".format(x=x_name, y=y_obs),
fontsize=font_size+5)
ax1.legend([y_obs, y_est], fontsize=font_size-2)
if save:
fig.savefig("/home/edward/work/repos/prometheus/python/plots/avg_response/{}.png".
format(x_name), bbox_inches='tight')
if show:
plt.show()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()",
"def plot_average_reponse(data,prj_info,pp=PdfPages(\"exploration.pdf\"),TMP=1234,bins=20):\r\n #Copy data\r\n data = data.copy()\r\n #Slice data\r\n data = data.sample(n = min(10000,data.shape[0]),random_state=1234)\r\n #Colnames\r\n var_to_plot = list(set(data.columns.values)-set(prj_info['PRJ_COLUMN'].values()))\r\n\r\n #Loop figure\r\n pbar = ProgressBar()\r\n for var in pbar(var_to_plot):\r\n #Bins\r\n if data[var].dtype.name != \"category\" and len(data[var].unique())>bins:\r\n data[\"var_new\"] = pd.qcut(data[var], bins, duplicates='drop')\r\n else:\r\n data[\"var_new\"] = data[var].astype(str)\r\n data_plot = data.groupby(\"var_new\").agg({prj_info['PRJ_COLUMN'][\"RESPONSE\"]: 'mean', \"var_new\": 'count'})\r\n\r\n #Table\r\n data_plot = data.groupby(\"var_new\").agg({prj_info['PRJ_COLUMN'][\"RESPONSE\"]: 'mean', \"var_new\": 'count'})\r\n\r\n #Build plot\r\n f, ax = plt.subplots()\r\n ax2 =ax.twinx()\r\n sns.barplot(x=data_plot.index.tolist(), y=\"var_new\",data=data_plot,ax=ax, color=\"dodgerblue\")\r\n sns.pointplot(x=data_plot.index.tolist(), y=prj_info['PRJ_COLUMN'][\"RESPONSE\"], data=data_plot,ax=ax2, color=\"chartreuse\")\r\n ax.set_xlabel(var)\r\n ax.set_ylabel(var)\r\n ax2.set_ylabel(prj_info['PRJ_COLUMN'][\"RESPONSE\"])\r\n plt.title(\"Average reponse by \" + var)\r\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=60)\r\n \r\n pp.savefig(f)\r\n\r\n return None",
"def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()",
"def PlotMA(df, show_plot=True):\n # Ensure 'Study' is in header.\n # TODO: Add code.\n # Obtain monthly means.\n df_ave = stats.MonthlyMean(df)\n # Get list of variables.\n var_list = set(df_ave.columns.get_level_values('Part B'))\n # Determine number of axes in the figure.\n n_var = len(var_list)\n n_row = np.int(np.around(np.sqrt(n_var)))\n if n_row**2 >= n_var:\n n_col = n_row\n else:\n n_col = n_row + 1\n fig, ax = plt.subplots(nrows=n_row, ncols=n_col, figsize=(16, 9))\n # Remove extra axes, if necessary.\n if n_row * n_col != n_var:\n n_empty = n_row * n_col - n_var\n for k in range(n_empty):\n ax[-1, -(1+k)].remove()\n # Initialize variables for updating axes.\n cur_row = 0\n cur_col = 0\n var_width = 0.9\n idx = np.arange(12)\n x_min = 0\n x_max = 12\n # Plot each variable.\n for var in var_list:\n df_var = df_ave.xs(var, level='Part B', axis=1)\n # Initialize parameters for current axis.\n y_min = np.min(df_var.values) * 0.95\n y_max = np.max(df_var.values) * 1.05\n col_var = df_var.columns\n bar_width = var_width / len(col_var)\n # Select current axis.\n if n_row == 1 and n_col == 1:\n ax_cur = ax\n elif n_row == 1:\n ax_cur = ax[cur_col]\n else:\n ax_cur = ax[cur_row, cur_col]\n # Plot study results for each variable.\n for c in col_var:\n idx_c = idx + 0.05 + col_var.get_loc(c) * bar_width\n val_c = df_var[c].values\n lab_c = '{} {}'.format(*c[:2])\n ax_cur.bar(idx_c, val_c, bar_width, label=lab_c, align='edge')\n # Set plot title.\n ax_cur.set_title('Monthly Averages of {}'.format(var))\n # Modify x-axis and y-axis.\n ax_cur.set_xticks(idx + 0.5)\n tick_labels = list(df_ave.index)\n ax_cur.set_xticklabels(tick_labels)\n label_c = set(df_var.columns.get_level_values('Part C'))\n _unit = df_var.columns.get_level_values('Units')\n _dtyp = df_var.columns.get_level_values('Data Type')\n unit_d = list(zip(_unit, _dtyp))\n combo_unit = [' '.join(i) for i in unit_d]\n label_u = set(combo_unit)\n ax_cur.set_ylabel('{} ({})'.format(r'/'.join(label_c),\n r'/'.join(label_u)))\n ax_cur.set_ylim(bottom=y_min, top=y_max)\n ax_cur.set_xlim(left=x_min, right=x_max)\n ax_cur.set_xlabel('Month')\n ax_cur.spines['right'].set_visible(False)\n ax_cur.spines['top'].set_visible(False)\n # Set legend.\n ax_cur.legend(title='Study, Part A')\n # Update current axis.\n cur_col += 1\n if cur_col >= n_col:\n cur_col = 0\n cur_row += 1\n # Adjust layout.\n plt.tight_layout()\n # Add figure notes.\n t = PlotChartNotes()\n if n_row * n_col != n_var:\n x_pos = (1 - n_empty / n_col) * 1.05\n y_pos = (1 / n_row) * 0.95\n plt.figtext(x_pos, y_pos, t, ha='left', va='top', wrap=True)\n else:\n plt.figtext(0.05, 0, t, ha='left', va='bottom', wrap=True)\n plt.subplots_adjust(bottom=0.2)\n # Show plot, if requested.\n if show_plot:\n plt.show()\n # Return figure and axes.\n return fig, ax",
"def create_grouped_bar_chart(stats: dict[str, list[int]]):\n\n figure, axes = plot.subplots()\n\n labels = [str(e) for e in CauseOfDeath]\n x = numpy.arange(len(labels))\n\n bar_width = 0.15\n max_value = 0\n\n rects = []\n i = 0\n for label, values in stats.items():\n max_value = max(max_value, max(values))\n rects.append(axes.bar(x + (i * bar_width), values, bar_width, label=label))\n i = i + 1\n\n axes.set_title(\"Deaths arranged by cause and animal type\")\n axes.set_ylabel(\"Amount\")\n axes.set_xticks(x)\n axes.set_xticklabels(labels)\n axes.legend()\n\n for rect in rects:\n attach_text_labels(rect, axes)\n\n figure.tight_layout()\n return figure",
"def Grouped_Bar_Plot(data,grouping_variable,headers='hide',\r\n figsize=(15,10),order=None,value_max=100.,sp_keywords={},\r\n Grid=None):\r\n\r\n grouping_variables=grouping_variable\r\n data=data.T\r\n\r\n if order is None:\r\n order=np.unique(grouping_variables)\r\n\r\n\r\n Grouped_data= data.groupby(grouping_variables,axis=1)\r\n\r\n\r\n\r\n\r\n ngroups =len(order)\r\n index_of_subplot=dict(zip(order,range(ngroups)))\r\n\r\n\r\n## make grid\r\n if Grid is None:\r\n\r\n fig = plt.figure(figsize=figsize)\r\n Grid=gridspec.GridSpec(1,1)[0]\r\n\r\n gs =gridspec.GridSpecFromSubplotSpec(1,ngroups,subplot_spec=Grid ,\r\n width_ratios= Grouped_data.size().loc[order].values)\r\n axes=[plt.subplot(gs[0])]\r\n\r\n for i in range(1,ngroups):\r\n axes.append(plt.subplot(gs[i],sharey=axes[0]))\r\n\r\n\r\n #gridspec_kw=dict(width_ratios= Grouped_data.size().loc[order].values ) # adapt widths\r\n\r\n #fig, axes = plt.subplots(1,ngroups, sharex=False, sharey=True, figsize=figsize,gridspec_kw=gridspec_kw)\r\n\r\n\r\n for group_name in order:\r\n\r\n data_group = Grouped_data.get_group(group_name)\r\n ax=axes[index_of_subplot[group_name]]\r\n\r\n BarPlot(data_group.T,value_max=value_max,headers=headers,ax=ax,**sp_keywords)\r\n plt.setp(ax.get_yticklabels(), visible=False)\r\n plt.setp(ax.get_legend(),visible=False)\r\n ax.set_title(group_name)\r\n\r\n\r\n\r\n plt.setp(axes[-1].get_legend(),visible=True)\r\n plt.setp(axes[0].get_yticklabels(), visible=True)\r\n plt.tight_layout(pad=0.8)\r\n return axes",
"def avg_response_report(df, var_list, y_obs, y_est, file):\n page = PdfPages(file)\n for var in var_list:\n avg_response(df, var, y_obs, y_est, show=False)\n page.savefig()\n page.close()",
"def visualize_outliers(df, var):\n import pandas as pd\n import numpy as np\n import matplotlib.pyplot as plt\n \n num_var = df.groupby(var)[var].count() \n total = np.float(len(df))\n \n var_perc = num_var / total \n \n var_perc.plot.bar()\n plt.ylabel('Percentage of observations per label')\n plt.title(var)\n \n return plt.show()",
"def plotBarChart(resultConfirmed, resultDeath, resultVaccinated):\n fig, ax = plt.subplots(3)\n\n ax[0].plot(resultConfirmed['Date'], resultConfirmed['Confirmed Cases'])\n ax[0].title.set_text('Confirmed Cases')\n \n ax[1].plot(resultDeath['Date'], resultDeath['Death Cases'])\n ax[1].title.set_text('Death Cases')\n \n ax[2].plot(resultVaccinated['Date'], resultVaccinated['Vaccinated Person'])\n ax[2].title.set_text('Vaccinated Cases')\n fig.tight_layout()\n plt.show()",
"def bar_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if distinct_enum_X == 1:\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 20:\n score = 3\n elif distinct_enum_X > 20:\n score = 40 / distinct_enum_X\n return score",
"def plot_mean_convergence(self, variables: List[str] = None, **adj_args):\n if self.iteration_count() < 2 or not self.save_all_iterations:\n raise ValueError(\"There is only one iteration.\")\n\n num_vars = self._get_num_vars()\n if variables is None:\n variables = num_vars\n elif any([v not in num_vars for v in variables]):\n raise ValueError(\"variables were either not numeric or not imputed.\")\n\n mean_dict = self.get_means(variables=variables)\n\n import matplotlib.pyplot as plt\n from matplotlib import gridspec\n\n plots = len(mean_dict)\n plotrows, plotcols = int(np.ceil(np.sqrt(plots))), int(\n np.ceil(plots / np.ceil(np.sqrt(plots)))\n )\n gs = gridspec.GridSpec(plotrows, plotcols)\n fig, ax = plt.subplots(plotrows, plotcols, squeeze=False)\n\n for v in range(plots):\n axr, axc = next(iter(gs[v].rowspan)), next(iter(gs[v].colspan))\n var = list(mean_dict)[v]\n ax[axr, axc].plot(list(mean_dict[var].values()))\n ax[axr, axc].set_title(var)\n ax[axr, axc].set_xlabel(\"Iteration\")\n ax[axr, axc].set_ylabel(\"mean\")\n plt.subplots_adjust(**adj_args)",
"def plot_multi_bars_means_stds_sats(image_list, ax): \n N = len(image_list)\n means = [compute_means(image) for image in image_list]\n stds = [compute_stds(image) for image in image_list]\n saturations = [(1-compute_saturations(image)) for image in image_list]\n \n mean_of_every_feature = []\n for idx in range(len(means)):\n mean_of_every_feature.append((means[idx] + stds[idx] + saturations[idx])/3)\n \n \n ## necessary variables\n ind = np.arange(N) # the x locations for the groups\n width = 0.3 # the width of the bars\n #the bars\n rects1 = ax.bar(ind, means, width, color='red')\n rects2 = ax.bar(ind+width, stds, width, color='green')\n rects3 = ax.bar(ind+2*width, saturations, width, color='blue')\n # axes and labels\n ax.set_xlim(-0.5*width,len(ind)+0.5*width)\n ax.set_ylim(0,1)# this is customized for optimal visualization\n# ax.set_xlabel(r'$Methods \\ in$')\n \n #ax.set_title('Scores by group and gender')\n xTickMarks = [r'$[9]$', \n r'$[23]$', \n r'$[17]$', \n r'$[18]$', \n r'$[24]$', \n r'RC']\n ax.set_xticks(ind+width)\n xtickNames = ax.set_xticklabels(xTickMarks)\n plt.setp(xtickNames, rotation=0)\n ## add a legend\n ax.legend( (rects1[0], rects2[0], rects3[0]), (r'$\\mu_{\\mathrm{diff}}$', r'$\\sigma_{\\mathrm{diff}}$', r'$\\lambda$'), \n loc=1, ncol=3, handlelength=0.8, borderpad=0.2, labelspacing=0.0)\n\n return mean_of_every_feature",
"def PlotAverageEstimate( measure='DM', ax=None, scenario={}, errorstart=0, **kwargs ):\n\n if ax is None:\n fig, ax = plt.subplots()\n\n avg, dev = [], []\n for iz, (redshift, color) in enumerate( zip(redshift_bins, Rainbow(redshift_bins)) ):\n P, x = GetLikelihood_Full( measure=measure, redshift=redshift, **scenario )\n a, s = Likelihood2Expectation( P=P, x=x, density=True, log=True )\n avg.append(a)\n dev.append(s)\n ## plot arrorbars, starting at the indicated position\n erb = ax.errorbar( redshift_bins[errorstart:], avg[errorstart:], np.array(dev).reshape([len(avg),2])[errorstart:].transpose(), **kwargs ) \n ## draw the full line with the same kwargs\n kwargs_ = kwargs.copy()\n ## however, remove those kwargs that do not work with plt.plot\n for key in ['errorevery', 'label']:\n kwargs_.pop( key, 0 )\n ## if color is not set, ensure that same color is used as for errorbar\n if 'color' not in kwargs:\n lines, collection = erb.get_children()\n color = lines.get_color()\n kwargs_['color'] = color\n ax.plot( redshift_bins, avg, **kwargs_ )\n# ax.errorbar( redshift_bins, avg, avg - 10**(np.log10(avg)-dev), **kwargs ) \n ax.set_yscale('log')\n ax.set_xlabel('redshift', fontdict={'size':16 })\n ax.set_ylabel('%s / %s' % (label_measure[measure], units[measure]), fontdict={'size':16 } )",
"def compare_plot_outcome(data_lastDV):\n # TODO: These should be box plots, not bar plots\n col_names = data_lastDV.columns.values.tolist() # get the columns' names\n outcome = col_names.pop() # remove the last item in the list\n\n dimension = 2 # TODO: figure out better way to organize plots by location\n\n fig = plt.figure()\n i = 1\n for cond in col_names:\n ax = fig.add_subplot(len(col_names)/dimension, dimension, i)\n #df_compare = pd.concat([data.groupby(cond)[cond].count(), data.groupby(cond)[outcome].mean()], axis=1) # displays num helpers selected in each condition\n df_compare = data_lastDV.groupby(cond)[outcome].mean() # displays num helpers selected in each condition\n ax = df_compare.plot(kind='bar', title=cond)\n ax.set_xlabel(cond)\n ax.set_ylabel(\"mean \" + outcome)\n i += 1\n fig.tight_layout()\n plt.show()",
"def plot_confidence_interval_for_variable (model, X, y, variable):\n\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n X_ds_new = X.copy()\n X_ds_new['actual'] = y\n X_ds_new['pred'] = np.mean(preds, axis=1)\n X_ds_new['pred_std'] = np.std(preds, axis=1)\n\n X_ds_grp = X_ds_new.groupby(variable)['actual', 'pred', 'pred_std'].agg('mean')\n X_ds_grp['count'] = X_ds_new[variable].value_counts()\n\n print (f'Average Predicted value and Std Dev by : {variable}')\n display(X_ds_grp)\n print ('')\n print (f'Distribution of Predicted value by : {variable}')\n sns.catplot(x=variable, y='pred', data=X_ds_new, kind='box')\n plt.show()",
"def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n elif plot == 2:\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"good\"][var]), name=\"Good\", hoverinfo=\"x\", marker_color='mediumturquoise'))\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"bad\"][var]), name=\"Bad\", hoverinfo=\"x\", marker_color='darkorange'))\n else:\n raise ValueError(\"plot number must be 0, 1, or 2\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})",
"def plotVarGroup(self, var, series, groups=None, labels=None, postfix=\"\", logy=True, fixedrange=False):\n # split the variable name\n varname = var.split(\"_\")\n\n # create the separate dataframes to plot from the provided groups\n # Define some labels if we have groups but no provided labels\n dfs = None\n if groups:\n dfs = [series.loc[g] for g in groups]\n if not labels or len(labels) != len(groups):\n labels = [\"Group %s\" % (i+1) for i in xrange(len(groups)-1)]\n labels.append(\"Rest\")\n else:\n dfs = [series]\n\n # Get right number of colors, and reverse them so that mediumpurple is \n # used for the bulk of the chips (assumed to be the last group)\n colors = (self.colorlist[:len(dfs)])\n colors.reverse()\n \n # Make the histogram\n # Get the preferred binning and check whether all values fall within that range \n # If not, let pyplot pick the range. \n if varname[0] in cutinfo11:\n nbins = cutinfo11[varname[0]][2]\n xmin = cutinfo11[varname[0]][3]\n xmax = cutinfo11[varname[0]][4]\n series_min = series.min()\n series_max = series.max()\n if fixedrange or (series_min > xmin and series_max < xmax):\n # Use the predefined ranges\n print fixedrange, xmin, xmax\n ax = plt.hist(dfs, bins=nbins, range=[xmin, xmax], stacked=True, \n color=colors, label=labels, log=logy)\n else:\n print \"auto scaling\"\n # Only use the number of bins from cutinfo11\n ax = plt.hist(dfs, bins=nbins, stacked=True, \n color=colors, label=labels, log=logy)\n else:\n # No info available, let pyplot deal with things, but use 20 bins\n ax = plt.hist(dfs, bins=20, stacked=True, \n color=colors, label=labels, log=logy)\n\n # Set the axis titles (use cutinfo11 if available)\n if varname[0] in cutinfo11:\n if len(varname) == 1:\n plt.xlabel(cutinfo11[varname[0]][0], \n fontsize=self.labelsize)\n else:\n plt.xlabel(\"%s ; %s\" % (cutinfo11[varname[0]][0], varname[1]), \n fontsize=self.labelsize)\n else:\n plt.xlabel(varname[0], \n fontsize=self.labelsize)\n plt.ylabel(\"Number of chips\", fontsize=self.labelsize)\n\n # set margins and format axis labels\n x0, x1, y0, y1 = plt.axis()\n if logy:\n plt.axis((x0, x1,\n 0.5, y1*10))\n else:\n plt.axis((x0, x1,\n 0.5, y1*(1+0.2)))\n ax = plt.gca()\n ax.tick_params(labelsize=self.ticklabelsize)\n plt.gcf().subplots_adjust(bottom=0.12)\n\n # Add mean and std info\n # Only use info on good chips, should be the last group in the list\n mean = dfs[-1].mean() #series.mean()\n std = dfs[-1].std() #series.std()\n plt.figtext(0.4, 0.92,\n \"Mean: %.3g Std/Mean: %.3g\\nStd: %.3g\"%(mean, std/mean, std),\n fontsize=self.ticklabelsize)\n\n # Add cut lines if we have info\n if self.cutfile != None and varname[0] in cutinfo11:\n plt.axvline(x=self.cuts[varname[0]][2], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varname[0]][3], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varname[0]][0], linestyle='solid', linewidth=2, color='dimgrey')\n plt.axvline(x=self.cuts[varname[0]][1], linestyle='solid', linewidth=2, color='dimgrey')\n\n # Add legend if we have labels\n if labels:\n plt.legend(loc='best', ncol=2)\n\n # Save figure\n plt.savefig(\"%s/%s%s.pdf\" % (self.outputdir, var, postfix))\n plt.clf()",
"def barh_plotter(data: pd.DataFrame, variable: str):\n fig, axs = plt.subplots(2,5, gridspec_kw={'wspace': 1, 'hspace': 0.2},\n figsize=(60, 40), sharex = False)\n\n for ax, dta in zip(axs.flatten(), data.values()) :\n ax.barh(dta['club'], dta[f'{variable}'])\n ax.set_xlabel(f'{variable}', fontsize=25)\n \n \n for ax, dta in zip(axs.flatten(), data.keys()):\n ax.set_title(dta, fontsize=30)\n\n return plt.show()",
"def visualize_tgt_by_categorical(df, var, target):\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pandas as pd\n \n plt.figure(figsize=(10,5))\n \n grouped_values = df.groupby(var)[target].mean().sort_values(ascending = False).reset_index()\n\n sns.set(style = 'white')\n sns.barplot(x = var, y = target, data = grouped_values, palette = sns.color_palette(\"RdBu\", n_colors = 7))\n\n return plt.show()",
"def plot_results(t_val, mood):\r\n N = 8\r\n theta = np.linspace(0.0, 2 * np.pi , N, endpoint=False)\r\n the_stats = [t_val['number_words'], t_val['average_character_length'], \r\n t_val['signs'], t_val['multiple_signs'], t_val['question'],\r\n t_val['exclamation'], t_val['name'], mood] \r\n \r\n width = np.pi / N \r\n\r\n plt.figure()\r\n \r\n handle = plt.subplot(111, polar=True)\r\n handle.set_xticklabels(['Word', 'AvrChar', 'Signs', '2Signs', '?', '!', 'name', 'mood'])\r\n \r\n handle.bar(theta, the_stats, width=width, bottom=1.0)\r\n \r\n plt.show()",
"def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n else:\n raise ValueError(\"plot number must be 0, 1\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})",
"def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3))\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.50, wspace=0.05)\n\n bar_width = 0.35\n for ax, (key, dat) in zip(axs.flatten(), df):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_title(f\"Effect size = {key}\")\n ax.set_xlabel(f\"Group Size\")\n ax.legend()\n\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n\n fig.suptitle(dict[\"title\"], y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()",
"def plot_2():\n plot_2 = read_data_csv('plot_2_data.csv')\n x_axis=\"Variable\"\n y_axis=\"Average Value\"\n title=\"Page Interactions for Cancelled and Active Users\"\n\n fig = go.Figure(\n [\n go.Bar(\n x=plot_2[plot_2['Churn'] ==1].variable,\n y=plot_2[plot_2['Churn'] ==1].value,\n text=plot_2[plot_2['Churn'] ==1].variable,\n ),\n go.Bar(\n x=plot_2[plot_2['Churn'] ==1].variable,\n y=plot_2[plot_2['Churn'] ==0].value,\n text=plot_2[plot_2['Churn'] ==1].variable,\n )\n\n ]\n )\n fig.update_layout(\n barmode='group',\n title=go.layout.Title(text=title, x=0.5),\n xaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=x_axis)),\n yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=y_axis)),\n )\n\n return fig",
"def bar_grapgh(dictionary, variable):\r\n plt.clf() # Deletes the previous plot \r\n plt.hist(dictionary[variable])\r\n plt.title('Histogram of ' + variable)\r\n plt.xlabel(variable)\r\n plt.ylabel('Frequency')\r\n plt.savefig(variable)",
"def make_group_plot(args):\n directory = args.directory\n prefix = args.prefix\n buckets = args.buckets \n\n # Collect all the results and create placeholder for results.\n all_files = glob.glob(directory + \"/\" + prefix + \"*.csv\")\n df = pd.concat((pd.read_csv(f) for f in all_files), axis=1)\n df.columns = all_files\n results_raw = df.as_matrix()\n num_bins = int(np.ceil(results_raw.shape[0]/buckets))\n results_binned = np.zeros((results_raw.shape[1], num_bins))\n\n # Bin the results.\n for run in range(results_raw.shape[1]):\n for bin_idx in range(num_bins):\n results_binned[run, bin_idx] = (np.mean(results_raw[\n int(bin_idx*buckets):int(bin_idx*buckets+buckets), run]))\n\n # Build the plot.\n fig, ax = plt.subplots(figsize=(args.figSizeX, args.figSizeY))\n sns.tsplot(data = results_binned, ax=ax, ci=[68, 95], color=\"m\")\n\n # Save the plot.\n ax.set_title(prefix + ' -- Average Binned Return', fontsize=18)\n ax.set_xlabel('Bin', fontsize=18)\n ax.set_ylabel('Average Return', fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=18)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n plt.savefig(os.path.join(directory, prefix+'_groupfig.png'), \n bbox_inches='tight')\n \n # Return binned results for group figure.\n return results_binned",
"def show_graph(d:dict):\n x = []\n y = []\n for key, value in d.items():\n x.append(str(key))\n y.append(value)\n\n x_pos = [i for i, _ in enumerate(x)]\n plt.figure()\n plt.bar(x_pos, y, color='green')\n plt.xlabel(\"Size\")\n plt.ylabel(\"Number of images\")\n plt.title(\"Count by size\")\n plt.xticks(x_pos, x)",
"def plot_outcomes(outcomes):\n outcomes = zip(*outcomes)\n players = outcomes[0]\n results = outcomes[1]\n numPlayers = len(players)\n \n x = range(numPlayers)\n y = results\n f = pylab.figure()\n\n ax = f.add_axes([0.1, 0.2, 0.8, 0.7])\n ax.bar(x, y, align='center')\n ax.set_xticks(x)\n ax.set_xticklabels(players, rotation = 15)\n \n pylab.title(\"How did everyone do?\")\n pylab.ylabel(\"Number of Wins\")\n\n f.show()",
"def plot_trends(group, country=\"US\", state=None, place=None, predictive_method=\"ARIMA\"):\n print(f\"* Plotting Google Trends of `{group}` for {country} - {state or 'All'}\")\n group_queries = get_group_queries(group, only_root=True)\n\n n_queries = len(group_queries)\n n_cols = 3\n n_rows = int(n_queries / n_cols) + (1 if n_queries % n_cols else 0)\n\n # Annotations\n annotations = []\n\n # Initialize figure with subplots\n subplot_titles = [\"%s...\" % t[:22] if len(t) >= 22 else t for t in group_queries]\n fig = make_subplots(\n rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n shared_yaxes=True,\n print_grid=True\n )\n\n # Marked Dates\n covid_start_date = COVID_START_DATE\n reopen_date = REOPEN_DATE\n reopen_date_minus_1 = REOPEN_DATE_MINUS_1\n data_start_date = DATA_START_DATE\n data_end_date = DATA_END_DATE\n\n # Figure variable\n baseline = 0\n value_range = [0, 100]\n\n # Model params\n model_params = []\n\n for idx, query in enumerate(group_queries):\n row = int(idx / n_cols) + 1\n col = idx % n_cols + 1\n showlegend = idx == 0\n\n query_file_path = get_data_filename(group, query, country=country, state=state, full=True)\n df = pd.read_csv(query_file_path, parse_dates=True)\n count = df[\"date\"].count()\n\n # ARIMA Model\n if query in df.columns:\n print(\"Query: \", query)\n # get_arima_params(df[query])\n df, model = arima_predict(df, from_date=PREDICT_FROM_DATE, value_col=query)\n params = model.get_params()\n model_params.append([query, str(params[\"order\"])])\n # return False\n \n # No data\n if count == 0:\n continue\n\n # Process\n stayhome_order_date = place.get(\"ClosedFrom\") if place else SOCIAL_DISTANCE_ORDER_DATE\n\n df = df[(df[\"date\"] >= data_start_date) & (df[\"date\"] <= data_end_date)]\n df_before = df[(df[\"date\"] <= reopen_date)]\n df_after = df[(df[\"date\"] >= reopen_date_minus_1)]\n df_prediction = df[df[\"is_predicted\"] == 1]\n\n # Normalize\n if config.TRENDS_APPLY_NORMALIZATION:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query].apply(lambda x: (x - baseline) / max_value)\n df_before[\"value\"] = df_before[query].apply(lambda x: (x - baseline) / max_value)\n df_after[\"value\"] = df_after[query].apply(lambda x: (x - baseline) / max_value)\n baseline = 0\n value_range = [-1, 1]\n else:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query]\n df_before[\"value\"] = df_before[query]\n df_after[\"value\"] = df_after[query]\n\n # Compute difference\n query_text = query.split(\"+\")[0].strip() + \" + ...\" if \"+\" in query else query\n actual_mean, actual_meanCI95min, actual_meanCI95max = mean_confidence_interval(df_prediction[query])\n predict_mean = df_prediction[\"prediction\"].mean()\n diff = round(100 * (actual_mean - predict_mean) / predict_mean, 1)\n diffCI95min = round(100 * (actual_meanCI95min - predict_mean) / predict_mean, 1)\n diffCI95max = round(100 * (actual_meanCI95max - predict_mean) / predict_mean, 1)\n x_date = list(df['date'])[int(df[\"date\"].count()/2)]\n diff_annot = go.layout.Annotation(\n text=f'<b>{query_text}</b><br><sub><b style=\"color:{config.COLOR_UPTREND if diff >= 0 else config.COLOR_DOWNTREND}\">{diff}%</b>; 95%CI, [{diffCI95min}%, {diffCI95max}%]</sub>',\n showarrow=False, xanchor=\"center\", yanchor=\"top\", \n x=x_date,\n y=0.0,\n xshift=0,\n yshift=-5,\n xref=f\"x{'' if idx == 0 else idx + 1}\",\n yref=f\"y{'' if idx == 0 else idx + 1}\"\n )\n annotations.append(diff_annot)\n\n # Lockdown period\n max_y = max(df[query].max(), abs(df[query].min()))\n min_y = -max_y\n shape_lockdown = go.layout.Shape(**{\"type\": \"rect\",\"y0\":100,\"y1\": -100,\"x0\":COVID_START_DATE, \n \"x1\":REOPEN_DATE,\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"fillcolor\":\"#eeeeee\", \"line\":dict(width=0), \"line_width\": 0})\n fig.add_shape(shape_lockdown, row=row, col=col)\n\n # Horizontal line \n shape = go.layout.Shape(**{\"type\": \"line\",\"y0\":baseline,\"y1\": baseline,\"x0\":str(df[\"date\"].values[0]), \n \"x1\":str(df[\"date\"].values[-1]),\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"line\": {\"color\": \"rgb(200, 200, 200)\",\"width\": 1.5}})\n fig.add_shape(shape, row=row, col=col)\n\n # Stay home order\n if stayhome_order_date:\n shape_stayhome_order = go.layout.Shape(**{\"type\": \"line\",\"y0\":-0.25,\"y1\": 0.25,\"x0\":stayhome_order_date, \n \"x1\":stayhome_order_date,\"xref\":\"x1\",\"yref\":\"y1\",\n \"line\": {\"color\": \"blue\",\"width\": 1.5, \"dash\": \"dot\"}})\n fig.add_shape(shape_stayhome_order, row=row, col=col)\n\n # Plot\n subplot_before = go.Scatter(x=df_before[\"date\"], y=df_before[\"value\"], \n mode=\"lines\", name=\"Before Lockdown\",\n line=dict(width=1, color=config.LINE_COLOR_BEFORE), \n line_shape=\"linear\", showlegend=False) # linear or spline \n subplot_after = go.Scatter(x=df_after[\"date\"], y=df_after[\"value\"], \n mode=\"lines\", name=\"Actual Queries\",\n line=dict(width=1.5, color=config.LINE_COLOR_AFTER), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_prediction = go.Scatter(x=df_prediction[\"date\"], y=df_prediction[\"prediction\"], \n mode=\"lines\", name=\"Expected Queries\",\n line=dict(width=2, color=config.LINE_COLOR_BEFORE, dash=\"dot\"), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_lockdown_legend = go.Bar(x=[reopen_date,], y=[0,], \n name=\"Early Lockdown Phase\", \n showlegend=showlegend,\n marker_color=\"#eeeeee\")\n fig.add_trace(subplot_before, row=row, col=col)\n fig.add_trace(subplot_after, row=row, col=col)\n fig.add_trace(subplot_prediction, row=row, col=col)\n if idx == 0:\n fig.add_trace(subplot_lockdown_legend, row=row, col=col)\n\n # break\n\n # Caption\n # caption = go.layout.Annotation(\n # showarrow=False,\n # text=\"\",\n # xanchor=\"center\",\n # x=0.5,\n # yanchor=\"top\",\n # y=0.0,\n # yshift=0,\n # )\n\n # Layout\n # location = f\"{country}.{state}\" if state else country\n # fig_title = f\"\"\"Term: {group}. Location: {location}<br>\n # <span style=\"font-size: 14px;line-height:1\">Period: {data_start_date} - {data_end_date}\n # <br>Lockdown Period: {covid_start_date} - {PREDICT_FROM_DATE}</span>\"\"\"\n fig_title = \"\"\n fig.update_layout(title={\"text\": fig_title, \"x\":0.5, \"xanchor\": \"center\"}, \n title_font=dict(size=12),\n height=50 + n_rows * 175, width=250 * n_cols, coloraxis=dict(colorscale=\"Bluered_r\"), \n showlegend=True, plot_bgcolor=\"rgb(255,255,255)\", titlefont={\"size\": 30},\n margin={\"t\": 50},\n annotations=annotations,\n legend=dict(\n orientation=\"v\",\n yanchor=\"bottom\",\n y=0,\n xanchor=\"right\",\n x=1,\n bgcolor=\"white\",\n bordercolor=\"#333\",\n borderwidth=1\n )\n )\n fig.update_xaxes(showgrid=False, showticklabels=False, showline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, showline=True, range=value_range)\n\n # Store model parameters\n mkdir_if_not_exist(config.TRENDS_OUTPUT_DIR)\n df_params = pd.DataFrame(model_params, columns=[\"Query\", \"Order\"])\n df_params.to_csv(\"%s/ARIMA_orders_%s.csv\" % (config.TRENDS_OUTPUT_DIR, group), index=False)\n\n # Create online URL\n url = py.iplot(fig, filename=group, file_id=group)\n print(\"URL:\", url.src)\n\n if config.TRENDS_EXPORT_FIGURES:\n # Save\n mkdir_if_not_exist(config.TRENDS_FIGURES_DIR)\n fig.write_image(\"%s/%s_%s_%s.jpg\" % (config.TRENDS_FIGURES_DIR, country, state or \"All\", group))\n # fig.show()\n else:\n # Show\n fig.show()",
"def plot_bar_graph(target_offenses, means_offenses, stdev_offenses, filename):\n\tfig, ax = plt.subplots()\n\tbar_width = 0.8\n\tindex = np.arange(len(target_offenses))\n\tbar_graph = plt.bar(index, means_offenses, width=bar_width, yerr=stdev_offenses)\n\n\tplt.xlabel('Offenses')\n\tplt.ylabel('Average Occurences Per Year')\n\tplt.title('Average Occurrences Per Year 2010-2014')\n\tax.set_xticklabels(target_offenses)\n\tax.xaxis.set(ticks=np.arange(bar_width/2, len(target_offenses)), ticklabels=target_offenses)\n\tplt.tight_layout()\n\tplt.savefig(filename,format=\"png\")\n\tplt.show()",
"def plot_zinc_bar(self, zinc_df):\n\n print(\"Now plotting bar chart ...\")\n\n # Arrange the zinc content by food group\n zinc_grp = zinc_df.groupby(['group'])['value'].median()\n\n # Plot bar chart\n xlabels = zinc_grp.index.get_level_values(0)\n fig = plt.figure(figsize=(16, 10))\n ax = fig.add_subplot(111)\n zinc_grp.plot(kind='bar', rot=80, fontsize=14)\n ax.set_xticklabels(xlabels, rotation=40, ha='right')\n ax.set_title(\"Median Zinc content by Food Groups\", fontsize=15)\n ax.set_xlabel(\"USDA Food Groups\", fontsize=14)\n ax.set_ylabel(\"Zinc Content in mg\", fontsize=14)\n ax.yaxis.grid(color='maroon', linestyle='--', linewidth=1)\n plt.tight_layout()\n plt.savefig('zinc_content.png')\n print \"Ends at:\", datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n plt.show()"
] |
[
"0.6272986",
"0.58815676",
"0.58753604",
"0.58711225",
"0.5843219",
"0.5785193",
"0.5747456",
"0.57237816",
"0.5717383",
"0.5712674",
"0.5618445",
"0.5618378",
"0.55925804",
"0.5560634",
"0.55557597",
"0.55506545",
"0.5549811",
"0.55497795",
"0.55254406",
"0.54724264",
"0.54606223",
"0.54131746",
"0.54053044",
"0.5405077",
"0.53952134",
"0.5390288",
"0.53839",
"0.534668",
"0.53455406",
"0.53329015"
] |
0.6649256
|
0
|
Creates a pdf report with avg response plots for each variable specified in var_list.
|
def avg_response_report(df, var_list, y_obs, y_est, file):
page = PdfPages(file)
for var in var_list:
avg_response(df, var, y_obs, y_est, show=False)
page.savefig()
page.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_ave(results_list):\n x_range = range(len(results_list[0]))\n err_x, err_y, std_list = [], [], []\n\n for i in x_range:\n if i % 10 == 0:\n #get average for each generation\n column = [] \n for result in results_list:\n column.append(result[i])\n average = np.average(column)\n \n std_dev = np.std(column)\n err_x.append(i)\n err_y.append(average)\n std_list.append(std_dev)\n\n pylab.errorbar(err_x, err_y, yerr=std_list)\n pylab.show()",
"def create_plots(file_list):\n # load data and transpose so that country names are\n # the columns and their gdp data becomes the rows\n\n # read data into a pandas dataframe and transpose\n for filename in file_list:\n data = pandas.read_csv(filename, index_col = 'country').T\n \n # create a plot the transposed data\n ax = data.plot(title=filename)\n\n # axes labels\n ax.set_xlabel('Year')\n ax.set_ylabel('GDP Per Capita')\n\n # set axes ticks\n ax.set_xticks( range(len(data.index)))\n ax.set_xticklabels(data.index, rotation=45)\n\n # display the plot\n plt.show()",
"def estimate_object_PDFs(fluxratiodictionarylist,generatePDFplots=False,basename='NEOGALobject',AGNcol='blue',SFcol='red',verbose=True):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Loading NEOGAL models ')\n SF_models = nm.load_model('combined',filepath='/Users/kschmidt/work/catalogs/NEOGALlines/nebular_emission/')\n\n AGN_models = nm.load_model('combined',filepath='/Users/kschmidt/work/catalogs/NEOGALlines/AGN_NLR_nebular_feltre16/')\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Define all possible line ratios from the lines:\\n '\n 'NV1240, CIV1550, CIII1908, HeII1640, OIII1663, and SiIII1888')\n fluxratiodic = {} # [[SF range], [AGN range]]\n fluxratiodic['NV1240/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['CIV1550/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['CIII1908/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['HeII1640/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['OIII1663/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['SiIII1888/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/OIII1663'] = [[0,1e10],[0,1e10]]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Set up mode line flux vectors')\n fluxdic = {} # [[SF flxu], [AGN flux]]\n fluxdic['NV1240'] = [SF_models['NV1240'], AGN_models['NV1240']]\n fluxdic['CIV1550'] = [SF_models['CIV1548']+SF_models['CIV1551'], AGN_models['CIV1548']+AGN_models['CIV1551']]\n fluxdic['CIII1908'] = [SF_models['CIII1908'], AGN_models['CIII1907']+AGN_models['CIII1910']]\n fluxdic['HeII1640'] = [SF_models['HeII1640'], AGN_models['HeII1640']]\n fluxdic['OIII1663'] = [SF_models['OIII1661']+SF_models['OIII1666'], AGN_models['OIII1661']+AGN_models['OIII1666']]\n fluxdic['SiIII1888'] = [SF_models['SiIII1888'], AGN_models['SiIII1888']]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Get ranges of model flux ratios')\n # for FR in fluxratiodic.keys():\n # numerator = FR.split('/')[0]\n # denominator = FR.split('/')[1]\n # fluxratiodic[FR][0] = [np.min(fluxdic[numerator][0]/fluxdic[denominator][0]),\n # np.max(fluxdic[numerator][0]/fluxdic[denominator][0])]\n # fluxratiodic[FR][1] = [np.min(fluxdic[numerator][1]/fluxdic[denominator][1]),\n # np.max(fluxdic[numerator][1]/fluxdic[denominator][1])]\n\n\n Nobj = len(fluxratiodictionarylist)\n if verbose: print(' - Get model selection given flux ratio ranges according to '+\n str(Nobj)+\" object's data provided \")\n if verbose: print(' Selection based on the total number of NEOGAL models: SF='+str(len(SF_models))+' and AGN='+str(len(AGN_models )))\n\n parametercollection_SF = [{'id':0, 'Zgas':[],'logUs':[],'xid':[],'nh':[],'COCOsol':[],'mup':[]}]*Nobj\n parametercollection_AGN = [{'id':0, 'Zgas':[],'logUs':[],'xid':[],'nh':[],'alpha':[]}]*Nobj\n\n for oo, FRdic_input in enumerate(fluxratiodictionarylist):\n objid = FRdic_input['id']\n\n # ------ resetting flux ratio dictionary for object ------\n fluxratiodic_obj = {}\n for key in fluxratiodic.keys():\n fluxratiodic_obj[key] = fluxratiodic[key]\n # --------------------------------------------------------\n\n for FR in FRdic_input.keys():\n if FR in fluxratiodic.keys():\n fluxratiodic_obj[FR] = [FRdic_input[FR],FRdic_input[FR]]\n # print(str(objid)+':'+FR+' -->'+str(fluxratiodic_obj[FR]))\n elif FR == 'id':\n pass\n else:\n print(' WARNING nm.estimate_object_PDFs(): The flux ratio entry '+FR+' is not availble in the \\n'\n ' dictionary from the NEOGAL models. Define that flux \\n'\n ' ratio or correct input data.')\n\n goodent_SF = np.arange(len(SF_models))\n goodent_AGN = np.arange(len(AGN_models))\n for FR in fluxratiodic.keys():\n numerator = FR.split('/')[0]\n denominator = FR.split('/')[1]\n\n goodent_FR_SF = np.where( (fluxdic[numerator][0]/fluxdic[denominator][0] >= fluxratiodic_obj[FR][0][0]) &\n (fluxdic[numerator][0]/fluxdic[denominator][0] <= fluxratiodic_obj[FR][0][1]))[0]\n goodent_SF = np.intersect1d(goodent_SF,goodent_FR_SF)\n\n goodent_FR_AGN = np.where( (fluxdic[numerator][1]/fluxdic[denominator][1] >= fluxratiodic_obj[FR][1][0]) &\n (fluxdic[numerator][1]/fluxdic[denominator][1] <= fluxratiodic_obj[FR][1][1]))[0]\n goodent_AGN = np.intersect1d(goodent_AGN,goodent_FR_AGN)\n\n\n parametercollection_SF[oo] = {'id' : FRdic_input['id'],\n 'Zgas' : SF_models['Zgas'][goodent_SF],\n 'logUs' : SF_models['logUs'][goodent_SF],\n 'xid' : SF_models['xid'][goodent_SF],\n 'nh' : SF_models['nh'][goodent_SF],\n 'COCOsol': SF_models['COCOsol'][goodent_SF],\n 'mup' : SF_models['mup'][goodent_SF]}\n\n parametercollection_AGN[oo] = {'id' : FRdic_input['id'],\n 'Zgas' : AGN_models['Zgas'][goodent_AGN],\n 'logUs' : AGN_models['logUs'][goodent_AGN],\n 'xid' : AGN_models['xid'][goodent_AGN],\n 'nh' : AGN_models['nh'][goodent_AGN],\n 'alpha' : AGN_models['alpha'][goodent_AGN]}\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Getting distribution ranges (percentiles) for parameter collections ')\n stat_SF = []\n stat_AGN = []\n\n for oo in np.arange(Nobj):\n stat_SF.append({'id':parametercollection_SF[oo]['id'], 'Zgas':[],'logUs':[],'xid':[],'nh':[],'COCOsol':[],'mup':[]})\n for key in stat_SF[oo].keys():\n if key == 'id': continue\n\n if len(parametercollection_SF[oo][key]) > 0:\n\n meanval_SF = np.mean(parametercollection_SF[oo][key])\n std_SF = np.std(parametercollection_SF[oo][key])\n medianval_SF = np.median(parametercollection_SF[oo][key])\n perc2p5_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.025)]\n perc16_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.16)]\n perc25_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.25)]\n perc50_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.50)]\n perc75_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.75)]\n perc84_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.84)]\n perc97p5_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.975)]\n\n stat_SF[oo][key] = [meanval_SF,std_SF,medianval_SF,perc2p5_SF,perc16_SF,perc25_SF,\n perc50_SF,perc75_SF,perc84_SF,perc97p5_SF]\n else:\n stat_SF[oo][key] = [np.nan]*10\n\n stat_AGN.append({'id':parametercollection_AGN[oo]['id'], 'Zgas':[],'logUs':[],'xid':[],'nh':[],'alpha':[]})\n for key in stat_AGN[oo].keys():\n if key == 'id': continue\n\n if len(parametercollection_AGN[oo][key]) > 0:\n meanval_AGN = np.mean(parametercollection_AGN[oo][key])\n std_AGN = np.std(parametercollection_AGN[oo][key])\n medianval_AGN = np.median(parametercollection_AGN[oo][key])\n perc2p5_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.025)]\n perc16_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.16)]\n perc25_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.25)]\n perc50_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.50)]\n perc75_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.75)]\n perc84_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.84)]\n perc97p5_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.975)]\n\n stat_AGN[oo][key] = [meanval_AGN,std_AGN,medianval_AGN,perc2p5_AGN,perc16_AGN,perc25_AGN,\n perc50_AGN,perc75_AGN,perc84_AGN,perc97p5_AGN]\n else:\n stat_AGN[oo][key] = [np.nan]*10\n\n stat_idlist = [stat_AGN[oo]['id'] for oo in np.arange(len(stat_AGN))]\n parametercollection_idlist = [parametercollection_AGN[oo]['id'] for oo in np.arange(len(parametercollection_AGN))]\n\n if stat_idlist != parametercollection_idlist:\n sys.exit(' NEOGALmodels.estimate_object_PDFs(): Wait a minute... the ID lists are not identical between \\n'\n ' the parameter collection ('+str(parametercollection_idlist)+') and \\n'\n ' the stats ('+str(stat_idlist)+')')\n\n plotname = basename+'_Stats.pdf'\n nm.plot_stat(plotname,stat_SF,stat_AGN,SFcol=SFcol,AGNcol=AGNcol,verbose=verbose)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if generatePDFplots:\n if verbose: print(' - Plotting the extracted model parameter collections')\n plotname = basename+'_PDFs.pdf'\n nm.plot_modelparametercollections(plotname, parametercollection_SF, parametercollection_AGN,\n stat_SF, stat_AGN, AGNcol=AGNcol,SFcol=SFcol,\n fluxratiodictionarylist=fluxratiodictionarylist,\n verbose=verbose)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n return parametercollection_SF, parametercollection_AGN, stat_SF, stat_AGN",
"def plot_average_reponse(data,prj_info,pp=PdfPages(\"exploration.pdf\"),TMP=1234,bins=20):\r\n #Copy data\r\n data = data.copy()\r\n #Slice data\r\n data = data.sample(n = min(10000,data.shape[0]),random_state=1234)\r\n #Colnames\r\n var_to_plot = list(set(data.columns.values)-set(prj_info['PRJ_COLUMN'].values()))\r\n\r\n #Loop figure\r\n pbar = ProgressBar()\r\n for var in pbar(var_to_plot):\r\n #Bins\r\n if data[var].dtype.name != \"category\" and len(data[var].unique())>bins:\r\n data[\"var_new\"] = pd.qcut(data[var], bins, duplicates='drop')\r\n else:\r\n data[\"var_new\"] = data[var].astype(str)\r\n data_plot = data.groupby(\"var_new\").agg({prj_info['PRJ_COLUMN'][\"RESPONSE\"]: 'mean', \"var_new\": 'count'})\r\n\r\n #Table\r\n data_plot = data.groupby(\"var_new\").agg({prj_info['PRJ_COLUMN'][\"RESPONSE\"]: 'mean', \"var_new\": 'count'})\r\n\r\n #Build plot\r\n f, ax = plt.subplots()\r\n ax2 =ax.twinx()\r\n sns.barplot(x=data_plot.index.tolist(), y=\"var_new\",data=data_plot,ax=ax, color=\"dodgerblue\")\r\n sns.pointplot(x=data_plot.index.tolist(), y=prj_info['PRJ_COLUMN'][\"RESPONSE\"], data=data_plot,ax=ax2, color=\"chartreuse\")\r\n ax.set_xlabel(var)\r\n ax.set_ylabel(var)\r\n ax2.set_ylabel(prj_info['PRJ_COLUMN'][\"RESPONSE\"])\r\n plt.title(\"Average reponse by \" + var)\r\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=60)\r\n \r\n pp.savefig(f)\r\n\r\n return None",
"def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()",
"def create_html_page_of_plots(list_of_plots, prefix='html'):\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n os.system('mv *.png %s' % prefix)\n #print(list_of_plots)\n idx = 0\n htmlfile = open('%s/index_0.html' % prefix, 'w')\n htmlfile.write('<!DOCTYPE html><html><body><div>\\n')\n for plot in list_of_plots:\n if idx > 0 and idx % 200 == 0:\n htmlfile.write('</div></html></html>\\n')\n htmlfile.close()\n htmlfile = open('%s/index_%d.html' % (prefix, (idx//200)), 'w')\n htmlfile.write('<!DOCTYPE html><html><body><div>\\n')\n htmlfile.write('<p><img src=\"%s\"></p>\\n' % plot)\n idx += 1\n htmlfile.write('</div></html></html>\\n')\n htmlfile.close()",
"def plotMultipleVars(self, vars, series, groups=None, labels=None, postfix=\"\",logy=True, fixedrange=False):\n # split the variable names, we'll use the first one for naming purposes\n varnames = [var.split(\"_\") for var in vars]\n\n # create the separate dataframes from the provided groups\n # Define some labels if we have groups and no provided labels\n # Stack all the variables we want to plot in one histogram\n dfs = None\n if groups:\n dfs = [series.loc[g,:].stack() for g in groups]\n if not labels or len(labels) != len(groups):\n labels = [\"Group %s\" % (i+1) for i in xrange(len(groups)-1)]\n labels.append(\"Bulk\")\n else:\n dfs = [series.stack()]\n\n\n # Get right number of colors, and reverse them so that mediumpurple is \n # used for the bulk of the chips (assumed to be the last group)\n colors = (self.colorlist[:len(dfs)])\n colors.reverse()\n \n # Make the histogram\n # Get the preferred binning and check whether all values fall within that range \n if varnames[0][0] in cutinfo11:\n nbins = cutinfo11[varnames[0][0]][2]\n xmin = cutinfo11[varnames[0][0]][3]\n xmax = cutinfo11[varnames[0][0]][4]\n series_min = series.min().min()\n series_max = series.max().max()\n if fixedrange or (series_min > xmin and series_max < xmax):\n ax = plt.hist(dfs, bins=nbins, range=[xmin, xmax], stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=nbins, stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=20, stacked=True, \n color=colors, label=labels, log=logy)\n\n # Set the axis titles\n if varnames[0][0] in cutinfo11:\n if len(varnames[0]) == 1:\n plt.xlabel(cutinfo11[varnames[0][0]][0], \n fontsize=self.labelsize)\n else:\n plt.xlabel(\"%s ; %s\" % (cutinfo11[varnames[0][0]][0], varnames[0][1]), \n fontsize=self.labelsize)\n else:\n plt.xlabel(varnames[0][0], \n fontsize=self.labelsize)\n plt.ylabel(\"Number of measurements\", fontsize=self.labelsize)\n\n # set margins and format axis labels\n x0, x1, y0, y1 = plt.axis()\n if logy:\n plt.axis((x0, x1,\n 0.5, y1*10))\n else:\n plt.axis((x0, x1,\n 0.5, y1*(1+0.2)))\n ax = plt.gca()\n ax.tick_params(labelsize=self.ticklabelsize)\n plt.gcf().subplots_adjust(bottom=0.12)\n\n # Add mean and std info\n # Only use info on good chips, should be the last group in the list\n mean = dfs[-1].mean() #series.stack().mean()\n std = dfs[-1].std() #series.stack().std()\n plt.figtext(0.4, 0.92,\n \"Mean: %.3g Std/Mean: %.3g\\nStd: %.3g\"%(mean, std/mean, std),\n fontsize=self.ticklabelsize)\n\n # Add cut lines if we have info\n if self.cutfile != None and varnames[0][0] in cutinfo11:\n plt.axvline(x=self.cuts[varnames[0][0]][2], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][3], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][0], linestyle='solid', linewidth=2, color='dimgrey')\n plt.axvline(x=self.cuts[varnames[0][0]][1], linestyle='solid', linewidth=2, color='dimgrey')\n\n # Add legend if we have labels\n if labels:\n plt.legend(loc='best', ncol=2)\n\n # Save figure\n plt.savefig(\"%s/%s%s.pdf\" % (self.outputdir, varnames[0][0], postfix))\n plt.clf()",
"def gen_report(a_dict):\n\n # header = \"{:<20} |{:^10}|{:^10}|{:>10}\".format(\"Donor Name\",\"Total Given\",\"Num Gifts\",\"Average Gift\")\n\n header = \"{:<20} |{:^13}|{:^13}|{:>13}\".format(\"Donor Name\",\"Total Given\",\"Num Gifts\",\"Average Gift\")\n\n print(header)\n for k,v in a_dict.items():\n total, num, avg = gen_stats(v)\n row = \"{:<20} ${:^13} {:^13}${:>13}\".format(k,total,num,avg)\n print(row)",
"def three_PDF_plots(res=200,table_exts=[''],**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n GR = glo.global_results()\n\n fig, axs = plt.subplots(3, sharex='col',\\\n figsize=(8,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n # First print cell data distribution\n i = 0\n for gal_index in zip(p.gal_index):\n ax1 = axs[i]\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n df = gal_ob.cell_data.get_dataframe()\n lognH = np.log10(df.nH)\n hist = np.histogram(lognH[df.nH.values > 0],bins=200,weights=df.m[df.nH.values > 0])\n hist1 = np.asarray(hist[0]) # histogram\n hist2 = np.asarray(hist[1]) # bin edges\n hist1 = hist1*1./sum(hist1)\n ax1.plot(hist2[0:len(hist1)],hist1,drawstyle='steps',ls='-',lw=1.5,\\\n alpha=0.7,color=p.color[0],label='Original cell distribution')\n \n for table_ext,ls,color in zip(table_exts,['--',':'],p.color[1::]):\n if '_M10' in table_ext: lab = 'Mach = 10'\n if '_arepoPDF_M51' in table_ext: lab = 'AREPO parametrized PDF'\n PDF(gal_index,color=color,table_ext=table_ext,ls=ls,res=200,add=True,ax=ax1,label=lab,ow=p.ow)\n \n if i == 0: ax1.legend(loc='upper right',fontsize=12)\n if i == 2: ax1.set_xlabel(getlabel('lnH'))\n ax1.set_ylabel('Mass fraction per bin')\n\n i += 1\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s%s_x3.png' % (p.sim_name,p.sim_run,p.table_ext), format='png', dpi=250, facecolor='w')",
"def _create_ts_plots(ts_agent_list, output_directory):\n\n # create traces for plots\n makespans_traces, makespans_layout, \\\n nh_sizes_traces, nh_sizes_layout, \\\n tl_sizes_traces, tl_sizes_layout = _make_ts_traces(ts_agent_list)\n\n # create plots\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ts_makespans.html'),\n auto_open=False)\n plot(dict(data=nh_sizes_traces, layout=nh_sizes_layout),\n filename=str(output_directory / 'neighborhood_sizes.html'),\n auto_open=False)\n plot(dict(data=tl_sizes_traces, layout=tl_sizes_layout),\n filename=str(output_directory / 'tabu_list_sizes.html'),\n auto_open=False)\n\n # create schedule\n best_solution = min([ts_agent.best_solution for ts_agent in ts_agent_list])\n best_solution.create_schedule_xlsx_file(str(output_directory / 'ts_schedule'), continuous=True)\n best_solution.create_gantt_chart_html_file(str(output_directory / 'ts_gantt_chart.html'), continuous=True)",
"def create_a_report(donor_list):\n num_of_donors = len(donor_list) / 3\n num_of_donors = int(num_of_donors)\n print(\"Donor Name\" + (' ' * 15) + \" | \" + \"Total Given\" \" | \" \"Num Gifts\" + \" | \" + \"Average Gift\\n\" + ('- ' * 30))\n donor_line = \"{:25} ${:>11.2f} {:^13} ${:>11.2f}\\n\"\n multiple_donor_lines = donor_line * num_of_donors\n\n def add_average_donation(donor_list):\n \"\"\"This function within the create_a_report function returns a new list after taking the existing donor_list,\n takes the first 3 entries of each donor - name, total donations, number of donations - and adds the\n average amount each donor contributes to the end of each donor's profile. This makes the donor_list 1/3 bigger\n than the original list.\"\"\"\n new_length = (len(donor_list) / 3) * 4\n longer_list = []\n beg = 0\n end = 3\n while len(longer_list) < new_length:\n longer_list.extend(donor_list[beg:end])\n longer_list.append(longer_list[-2] / longer_list[-1])\n beg += 3\n end += 3\n return longer_list\n longer_list = add_average_donation(donor_list)\n print(multiple_donor_lines.format(*longer_list))",
"def _create_ga_plots(ga_agent, output_directory):\n\n # create trace for plot\n makespans_traces, makespans_layout = _make_ga_traces(ga_agent)\n\n # create plot\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ga_makespans.html'),\n auto_open=False)\n\n # create schedule\n ga_agent.best_solution.create_schedule_xlsx_file(str(output_directory / 'ga_schedule'), continuous=True)\n ga_agent.best_solution.create_gantt_chart_html_file(str(output_directory / 'ga_gantt_chart.html'), continuous=True)",
"def plot_multi_bars_means_stds_sats(image_list, ax): \n N = len(image_list)\n means = [compute_means(image) for image in image_list]\n stds = [compute_stds(image) for image in image_list]\n saturations = [(1-compute_saturations(image)) for image in image_list]\n \n mean_of_every_feature = []\n for idx in range(len(means)):\n mean_of_every_feature.append((means[idx] + stds[idx] + saturations[idx])/3)\n \n \n ## necessary variables\n ind = np.arange(N) # the x locations for the groups\n width = 0.3 # the width of the bars\n #the bars\n rects1 = ax.bar(ind, means, width, color='red')\n rects2 = ax.bar(ind+width, stds, width, color='green')\n rects3 = ax.bar(ind+2*width, saturations, width, color='blue')\n # axes and labels\n ax.set_xlim(-0.5*width,len(ind)+0.5*width)\n ax.set_ylim(0,1)# this is customized for optimal visualization\n# ax.set_xlabel(r'$Methods \\ in$')\n \n #ax.set_title('Scores by group and gender')\n xTickMarks = [r'$[9]$', \n r'$[23]$', \n r'$[17]$', \n r'$[18]$', \n r'$[24]$', \n r'RC']\n ax.set_xticks(ind+width)\n xtickNames = ax.set_xticklabels(xTickMarks)\n plt.setp(xtickNames, rotation=0)\n ## add a legend\n ax.legend( (rects1[0], rects2[0], rects3[0]), (r'$\\mu_{\\mathrm{diff}}$', r'$\\sigma_{\\mathrm{diff}}$', r'$\\lambda$'), \n loc=1, ncol=3, handlelength=0.8, borderpad=0.2, labelspacing=0.0)\n\n return mean_of_every_feature",
"def plotvar(x, y, var, weights, NBINS = 70, title = '', targetdir = '.'):\n bins = np.linspace(np.percentile(x, 0.5), np.percentile(x, 99), NBINS)\n plot_reweight_result(x, y, bins, weights, title = title, xlabel = var)\n plt.savefig(f'{targetdir}/{var}.pdf', bbox_inches='tight')\n plt.close()",
"def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)",
"def plot_distributions(x, variable_name):\n n_cols = x.shape[1]\n\n plot_rows = n_cols // 2\n plot_rows += n_cols % 2\n plot_cols = 2\n\n position = range(1, n_cols + 1)\n fig = plt.figure()\n\n for col_index in range(n_cols):\n col_values = x[:, col_index]\n ax = fig.add_subplot(plot_rows, plot_cols, position[col_index])\n ax.hist(col_values)\n ax.set_title(\"Distribution of variable {}{}\".format(variable_name, col_index + 1))\n ax.set_ylabel(\"Frequency\")\n ax.set_xlabel(\"Value\")\n\n plt.tight_layout()\n plt.savefig(\"plots/{}Dist.png\".format(variable_name))\n plt.show()",
"def output_files(self,positions, num_trials):\r\n output_text = open('results.txt', 'w')\r\n result = self.simulation(positions, num_trials)\r\n for pos in positions:\r\n position_value = 1000 / pos\r\n mean = np.mean(result[pos])\r\n std = np.std(result[pos])\r\n plt.hist(result[pos],100,range=[-1,1])\r\n plt.savefig(\"histogram_\"+str(pos).zfill(4)+\"_pos.pdf\")\r\n plt.close()\r\n output_text.write('For position : {0} with position Value: {1} '.format(pos,position_value))\r\n output_text.write(' The mean is: {0} The standard deviation: {1} \\n'.format(mean,std))\r\n output_text.close()",
"def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()",
"def print_report(donors_list):\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_list):\n name = donor[0]\n total = sum(donor[1])\n num_gift = len(donor[1])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)",
"def pretty_print(list_of_averages):\n try:\n for date, average in list_of_averages:\n print('{:11s}{:0.2f}'.format(date, average))\n\n except Exception as e:\n print(e)\n exit()",
"def plot_pnl_pdf(title, vol, var, lgnd, report):\n fig, ax = plt.subplots(1, figsize=(16, 8))\n mu, sigma = 0, vol\n x = np.linspace(mu - 5 * sigma, 5 * sigma, 100)\n plt.plot(x, norm.pdf(x, mu, sigma));\n\n # add a vertical line for the VaR\n _ = plt.axvline(x=var, color='r', linestyle='--')\n\n # add title and labels\n _ = plt.legend(labels=[lgnd])\n _ = plt.xlabel('P&L', size=16)\n _ = plt.ylabel('density', size=16)\n\n # display the histogram\n report.write_plot(title)\n _ = plt.title(title, size=32)\n plt.show();\n plt.close();",
"def plot_fits_and_residuals(all_fits_df, dfs_list, expt_name, **kwargs):\n\n colors = cm.rainbow(np.linspace(0, 1, len(dfs_list)))\n fig = plt.figure(figsize=(5, 5), tight_layout=True)\n fig.set_dpi(300)\n\n filename = f'{expt_name}_fits_and_residuals'\n fileformat = '.png'\n \n # Set parameters for decay traces plot\n xlabel_traces = kwargs.get('xlabel_traces', 'Time after Chase (Hrs.)')\n ylabel_traces = kwargs.get('ylabel_traces', 'YFP(t)/YFP(0)')\n ylim_traces = kwargs.get('ylim_traces', (0, 1.2))\n xticks_traces = kwargs.get('xticks_traces', make_ticks(all_fits_df.x_input, decimals=0))\n yticks_traces = kwargs.get('y_ticks_traces', make_ticks((0, 1), decimals=1, n_ticks=7))\n xlim_traces = kwargs.get('xlim_traces', (xticks_traces.min(), xticks_traces.max())) \n # Set parameters for decay fit residuals plot\n xlabel_resids = kwargs.get('xlabel_resids', xlabel_traces)\n ylabel_resids = kwargs.get('ylabel_resids', 'Residuals')\n xlim_resids = kwargs.get('xlim_resids', xlim_traces)\n xticks_resids = xticks_traces\n yticks_resids = kwargs.get('yticks_resids', make_yticks_0cent(all_fits_df.residual))\n ylim_resids = kwargs.get('ylim_resids', (yticks_resids.min(), yticks_resids.max()))\n \n # Set parameters for decay fit residuals kernel density estimate\n # plot \n xlabel_kde = kwargs.get('xlabel_kde', ylabel_resids)\n ylabel_kde = kwargs.get('ylabel_kde', 'Density')\n xlim_kde = kwargs.get('xlim_kde', ylim_resids)\n ylim_kde = kwargs.get('ylim_kde', None)\n xticks_kde = yticks_resids\n # yticks_kde will get set below during \n # density calcuation\n \n # Set parameters used across all plots\n hidden_spines = kwargs.get('hidden_spine', ['top', 'right'])\n labelfontsize = kwargs.get('labelfontsize', 12)\n linewidth = kwargs.get('linewidth', 1)\n linealpha = kwargs.get('linealpha', 1)\n scatteralpha = kwargs.get('scatteralpha', 0.8)\n scattersize = kwargs.get('scattersize', 5)\n \n # Make the residuals scatter plot\n ax = fig.add_subplot(222)\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax.scatter(cell_df.x_input, cell_df.residual,\n s=scattersize, alpha=scatteralpha,\n facecolor='white', edgecolor=colors[cell_index])\n\n ax.axhline(0, linewidth=linewidth, alpha=linealpha, color='black')\n for spine in [ax.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n try:\n ax.set_xticks(xticks_resids)\n except:\n pass\n try:\n ax.set_yticks(yticks_resids)\n except:\n pass\n try:\n ax.set_ylim(ylim_resids)\n except:\n pass\n if xlabel_resids:\n ax.set_xlabel(xlabel_resids, fontsize=labelfontsize)\n if ylabel_resids:\n ax.set_ylabel(ylabel_resids, fontsize=labelfontsize) \n\n ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box')\n\n # Scatter plot of traces and line plot of fitted decays\n ax2 = fig.add_subplot(221)\n\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax2.plot(cell_df.x_input, cell_df.y_pred_norm/cell_df.y_pred_norm.max(),\n linewidth=linewidth, alpha=linealpha, color=colors[cell_index])\n ax2.scatter(cell_df.x_input, cell_df.y_input_norm/cell_df.y_input_norm.max(),\n s=scattersize, alpha=scatteralpha, color=colors[cell_index])\n\n if ylim_traces:\n ax2.set_ylim(ylim_traces)\n if xlim_traces:\n ax2.set_xlim(xlim_traces) \n try:\n ax2.set_xticks(xticks_traces)\n except:\n pass\n try:\n ax2.set_yticks(yticks_traces)\n except:\n pass \n if xlabel_traces:\n ax2.set_xlabel(xlabel_traces, fontsize=labelfontsize)\n if ylabel_traces:\n ax2.set_ylabel(ylabel_traces, fontsize=labelfontsize) \n\n ax2.set_aspect(1.0/ax2.get_data_ratio(), adjustable='box')\n for spine in [ax2.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n # Smoothed hist of residuals for each cell (KDE plot)\n ax3 = fig.add_subplot(223)\n \n densities = []\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n density = gaussian_kde(cell_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color=colors[cell_index],\n alpha=linealpha, linewidth=linewidth)\n densities.append(density(xs))\n\n # Also plot total residuals\n density = gaussian_kde(all_fits_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color='black',\n alpha=linealpha*2, linewidth=linewidth)\n densities.append(density(xs))\n \n # Figure out whech density outuput array has the highest y value and\n # set the yticks of the plot using that density array\n max_dens = np.array([np.max(arr) for arr in densities])\n longest_range_den = densities[max_dens.argmax()]\n yticks_kde = make_ticks(longest_range_den)\n\n if ylim_kde:\n ax3.set_ylim(ylim)\n if xlim_kde:\n ax3.set_xlim(xlim_kde)\n if xlabel_kde:\n ax3.set_xlabel(xlabel_kde, fontsize=labelfontsize)\n if ylabel_kde:\n ax3.set_ylabel(ylabel_kde, fontsize=labelfontsize)\n try:\n ax3.set_yticks(yticks_kde)\n except:\n pass\n try:\n ax3.set_xticks(xticks_kde)\n except:\n pass\n\n ax3.set_aspect(1.0/ax3.get_data_ratio(), adjustable='box')\n for spine in [ax3.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n if filename and fileformat:\n fig.savefig(f'{filename}{fileformat}', transparent=True)\n print(f'Saved plot at {filename}{fileformat}')",
"def make_N4255_plots(data_obj, aspect_corr=1.0, title_pages=False):\n\n print(\"Generating plots...\")\n\n #Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n #Call the function to create the title page of the pdf document\n plot_front_title(data_obj)\n\n #-----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n #-----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n #-----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n #-----------------------------------------------------------------------------------#\n\n #Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 1, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the first test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the third test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 3, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] #Make sure the local yi is updated\n\n #Plot the overall results text of the fourth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 4, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the fifth test, NEQ Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 5, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) #Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n #Plot the overall results text of the sixth test, Flatness of field\n yi = yi - dy[0]\n plot_ff_text(data_obj, 6, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the seventh test, Image Extent\n yi = yi - dy[0]\n plot_extent_text(data_obj, 7, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the eighth test, Image Area\n yi = yi - dy[0]\n plot_area_text(data_obj, 8, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the ninth test, Aspect Ratio\n yi = yi - dy[0]\n plot_a_ratio_text(data_obj, 9, yi, xpos, ha, va, fs, dfs)\n\n #--------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n #--------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs)\n\n\n #-----------------#\n # Plot the images #\n #-----------------#\n plot_images(data_obj, fs) #Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) #Add in the footnotes to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Penetration\")\n\n #Call the function to plot the Steel Penetration results to the pdf\n plot_steel_pen_N4255(data_obj, 1)\n\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 2, cmap)\n\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 3)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 4)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 5)\n\n #-------------------#\n # Flatness of field #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Flatness of Field\")\n\n # Call the function to plot the Flatness of Field results to the pdf\n plot_field_flatness(data_obj, 6)\n\n #--------------#\n # Image extent #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 7: Image Extent\")\n\n # Call the function to plot the Image Extent results to the pdf\n plot_image_extent(data_obj, 7)\n\n\n #------------#\n # Image Area #\n #------------#\n if title_pages:\n fig = new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, 'Test 8: Image Area', ha='center', va='center', fontsize=20)\n str1 = str(data_obj.image_area[0]) + ' by ' + str(data_obj.image_area[1]) + ' pixels'\n plt.text(0.5, 0.4, str1, ha='center', va='center', fontsize=12)\n\n #--------------#\n # Aspect Ratio #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 9: Aspect Ratio\")\n\n #Call the function to plot the Aspect Ratio results to the pdf\n plot_aspect_ratio(data_obj, 9, cmap, aspect_corr)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)",
"def _plot_1d(var, outfile):\n fig = plt.figure(figsize = (5, 5))\n ax = fig.add_subplot(111)\n ax.hist(np.array(var))\n ax.set_xlabel(var.name)\n fig.savefig(outfile)\n plt.close()",
"def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()",
"def create_report():\n donors = donor_names()\n \n total_given = list()\n num_gifts = list()\n average_gift = list()\n for row in donor_db:\n total_given.append(sum(row[1:]))\n num_gifts.append(len(row[1:]))\n average_gift.append(sum(row[1:]) / len(row[1:]))\n \n print(\"Donor Name | Total Given | Num Gifts | Average Gift\\n\")\n print('------------------------------------------------------------------')\n for row in range(len(donors)):\n print(\"{:25} ${:13.2f}{:11d} ${:13.2f}\".format(donors[row], total_given[row], \n num_gifts[row], average_gift[row]))",
"def create_report():\n donors = donor_names()\n \n total_given = list()\n num_gifts = list()\n average_gift = list()\n for row in donor_db:\n total_given.append(sum(row[1:]))\n num_gifts.append(len(row[1:]))\n average_gift.append(sum(row[1:]) / len(row[1:]))\n \n print(\"Donor Name | Total Given | Num Gifts | Average Gift\\n\")\n print('------------------------------------------------------------------')\n for row in range(len(donors)):\n print(\"{:25} ${:13.2f}{:11d} ${:13.2f}\".format(donors[row], total_given[row], \n num_gifts[row], average_gift[row]))",
"def plot_data(indf, prefix='html'):\n list_of_plots = []\n# scatter_matrix(indf)\n# pl.savefig('scatter_matrix.png')\n# list_of_plots.append('scatter_matrix.png')\n\n for col in indf:\n pl.clf()\n# cond = indf[col].notnull()\n# v = indf[cond][col]\n v = indf[col]\n# nent = len(v)\n# hmin, hmax = v.min(), v.max()\n# xbins = np.linspace(hmin,hmax,nent)\n# hmin, hmax, nbin = BOUNDS[col]\n# xbins = np.linspace(hmin, hmax, nbin)\n v.hist(bins=20, histtype='step', normed=True, log=True)\n pl.title(col)\n pl.savefig('%s_hist.png' % col)\n list_of_plots.append('%s_hist.png' % col)\n\n create_html_page_of_plots(list_of_plots, prefix)\n return",
"def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path",
"def create_gnuplot_statistic(statistic_entries):\n grouped_by_number_of_entries = {}\n for statistic in statistic_entries:\n key = statistic['max entries']\n if key not in grouped_by_number_of_entries:\n grouped_by_number_of_entries[key] = [statistic]\n else:\n grouped_by_number_of_entries[key].append(statistic)\n\n all_plots = multiplot(\"learn.py statistics\", title_font=(\"\", 18), plots_per_row=2)\n\n pos = 0\n max_pos = len(grouped_by_number_of_entries) - 1\n for key, statistic in grouped_by_number_of_entries.items():\n average_time_plot = plot()\n average_time_plot.set_ylabel(\"seconds\")\n if pos == max_pos:\n average_time_plot.set_xlabel(\"n'th test run\")\n average_time_plot.set_xtics(\"1\")\n average_time_plot.set_ytics(\"0.5\")\n average_time_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n average_time_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([average(entry) for entry in statistic], 1))\n average_time_plot.add_curve(\"average times (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(average_time_plot)\n\n number_of_tests_plot = plot()\n number_of_tests_plot.set_ylabel(\"# tests\")\n if pos == max_pos:\n number_of_tests_plot.set_xlabel(\"n'th test run\")\n number_of_tests_plot.set_xtics(\"1\")\n number_of_tests_plot.set_ytics(\"1\")\n number_of_tests_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n number_of_tests_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([entry['correct answers'] + entry['wrong answers']\n for entry in statistic], 1))\n number_of_tests_plot.add_curve(\"# of tests (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(number_of_tests_plot)\n pos += 1\n\n calculated_height = len(grouped_by_number_of_entries) * 250\n script(\"learn.gp\", all_plots, width=800, height=calculated_height).execute()"
] |
[
"0.62130785",
"0.5782566",
"0.57189924",
"0.56687677",
"0.56392014",
"0.5576962",
"0.5452435",
"0.53628135",
"0.5344447",
"0.53296703",
"0.53150797",
"0.5289431",
"0.5255784",
"0.5237809",
"0.52253234",
"0.5200862",
"0.5194412",
"0.51737",
"0.51735514",
"0.5161112",
"0.5139487",
"0.5131945",
"0.51090276",
"0.5097443",
"0.5080895",
"0.50770587",
"0.50770587",
"0.5063089",
"0.5059632",
"0.5055506"
] |
0.8189606
|
0
|
Constructor creates a straight line with xcoefficient inita, ycoefficient initb, and constant coefficient initc
|
def __init__(self, inita=0, initb=1, initc=0):
self.a = inita
self.b = initb
if self.a == 0 and self.b == 0:
raise Exception("Invalid straight line equation")
self.c = initc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, c, p1=Point(), p2 = Point()):\n Line.__init__(self, p1, p2)\n self.cnv = c",
"def __init__(self, x, y):\n super().__init__()\n\n # Calculate the quadratic coefficients\n x_sq = [xx**2 for xx in x]\n A = np.vstack([x_sq, x, np.ones(len(x))]).T\n self.a, self.b, self.c = np.linalg.lstsq(A,y,rcond=None)[0]\n \n # Calculate the coefficient of determination\n f = [self.f(xx) for xx in x]\n ssres = ((f - y)**2).sum()\n sstot = ((y - y.mean())**2).sum()\n\n if abs(sstot) < 1e-14:\n # Data points actually formed a horizontal line\n self.rsq = 0.0\n else:\n self.rsq = 1 - ssres / sstot",
"def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)",
"def __init__(self, x, y):\n x = np.asarray(x)\n y = np.asarray(y)\n x = np.ravel(x)\n y = np.ravel(y)\n if x.shape != y.shape:\n raise ValueError(\"x and y must have the same length\")\n self.x = x\n self.y = y\n \n self.n = int(len(self.x))\n\n self.qb, self.qc = self.qspline_params()\n self.cb, self.cc, self.cd = self.cspline_params()",
"def __init__(self, point1, point2):\n self.point1 = point1\n self.point2 = point2\n self.vertical = False\n self.fixed_x = None\n self.k = None\n self.b = None\n\n # cached angle props\n self.angle = None\n self.angle_cos = None\n self.angle_sin = None\n\n self.set_line_props(point1, point2)",
"def __init__(self):\n\t\tself.theta = 0.8\t\t\t# Theta value, the constant of the line which x+y is.(1.2 is best)\n\t\tself.numberOfInput = 0\t\t# The number of Input\n\t\tself.weight = []\t\t\t# The list of weight.",
"def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)",
"def __init__(self, m=np.random.normal(M_INIT, .25, 1)[0], b=np.random.normal(B_INIT, .25, 1)[0], \\\n\t\t\t\t\tt=np.random.normal(T_INIT, .25, 1)[0], l=L_INIT*np.random.normal(1.0, .25, 1)[0]):\n\t\t\n\t\tself.shape_slope = m\n\t\tself.z_thick = b\n\t\tself.thick = t\n\t\tself.length = l",
"def __init__(self, start, end, oldLine = None):\n self.__start = start\n self.__end = end\n if(self.__start == self.__end):\n \"\"\"\n If a zero length line is created that most likely means there is a\n logic problem somewhere in the program. This does not throw and error\n so that the output can still be examined to help diagnose the problem.\n \"\"\"\n# raise Exception('Zero length line')\n logger.warning('A line was created with no length at: ' + \n str(self.start))\n \"\"\" The Point which is the upper left corner of the line's bounding box \"\"\"\n self.__upperLeft = None\n \"\"\" The Point of the lower right corner of the bounding box. \"\"\"\n self.__lowerRight = None\n self.__extrusionRate = 0\n self.freezeExRate = False\n if not(oldLine is None):\n self.__extrusionRate = oldLine.extrusionRate\n self.freezeExRate = oldLine.freezeExRate\n self.vector = np.array([self.end.x-self.start.x,\n self.end.y-self.start.y])",
"def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad",
"def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad",
"def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad",
"def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad",
"def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad",
"def __init__(self, wavelength):\n # store experimental data\n self.x = wavelength\n\n # Central wavelengths of the lines are known constants:\n self.c1 = 422.\n self.c2 = 428.",
"def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')",
"def __init__(self, slope):\n self.slope = slope",
"def _line(x,m,c):\n return m*x+c",
"def _line(x,m,c):\n return m*x+c",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def __init__(self, vertices, **kwargs):\n super(Line, self).__init__(vertices, **kwargs)\n self._geotype = \"Line\"\n return",
"def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)",
"def __init__(self, a, b, c):\r\n if a == 0: \r\n raise ValueError(\"Coefficient 'a' cannot be 0 in a quadratic equation.\")\r\n else:\r\n self.__a = float(a)\r\n self.__b = float(b)\r\n self.__c = float(c)",
"def __init__(self,x,y, alpha = 0):\n self.x = x\n self.y = y\n self.alpha = alpha\n if len(x) != len(y): raise LRDataException(\"Lengths of input and response don't match\") \n if len(x) == 0: raise LRDataException(\"Data set is empty\")\n # Precalculate {y_i*x_ij} for all j\n self.xy = x*y[:,None]",
"def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)",
"def abline(slope, intercept, a, b):\n # axes = plt.gca()\n print(slope)\n print(intercept)\n x_vals = np.array(list_xs[ a: b])\n y_vals = intercept + slope * (x_vals-a)\n plt.plot(x_vals, y_vals, '--')\n # print(x_vals)",
"def __init__(self, controlPoints=None):\n super(CatmullRomSpline, self).__init__(controlPoints)",
"def line(self, x0, y0, x1, y1, char): # noqa: C901, PLR0912\n # pylint: disable=too-many-arguments, too-many-branches\n if x0 > x1:\n x1, x0 = x0, x1\n y1, y0 = y0, y1\n\n dx = x1 - x0\n dy = y1 - y0\n\n if dx == 0 and dy == 0:\n self.point(x0, y0, char)\n elif abs(dx) >= abs(dy):\n for x in range(x0, x1 + 1):\n if dx == 0:\n y = y0\n else:\n y = y0 + int(round((x - x0) * dy / float(dx)))\n self.point(x, y, char)\n elif y0 < y1:\n for y in range(y0, y1 + 1):\n if dy == 0:\n x = x0\n else:\n x = x0 + int(round((y - y0) * dx / float(dy)))\n self.point(x, y, char)\n else:\n for y in range(y1, y0 + 1):\n if dy == 0:\n x = x0\n else:\n x = x1 + int(round((y - y1) * dx / float(dy)))\n self.point(x, y, char)",
"def __init__(self, x0, y0, x1, y1, r):\n\n self._x0 = x0\n self._y0 = y0\n self._x1 = x1\n self._y1 = y1\n self._rsquared = r * r",
"def __init__(self, polyhedron, data):\n super(Line, self).__init__(polyhedron, data)"
] |
[
"0.692951",
"0.626148",
"0.61403966",
"0.60645384",
"0.60610247",
"0.60569686",
"0.60559434",
"0.5993065",
"0.5984651",
"0.5981468",
"0.5981468",
"0.5981468",
"0.5981468",
"0.5981468",
"0.59266686",
"0.5915448",
"0.59000385",
"0.5891752",
"0.5891752",
"0.58851874",
"0.5878546",
"0.582182",
"0.58170766",
"0.5798756",
"0.5776754",
"0.5774309",
"0.57395536",
"0.5727669",
"0.5718756",
"0.571842"
] |
0.76026696
|
0
|
Return the slope of the straight line. If the line is vertical, None is returned.
|
def slope(self):
if self.b == 0:
return None
else:
return (-1) * self.a/self.b
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_segment_slope(segment: Tuple[Point]):\n return (\n (segment[0].y - segment[1].y) / (segment[0].x - segment[1].x)\n if (segment[0].x - segment[1].x) != 0\n else float(\"inf\")\n )",
"def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot",
"def _get_slope(x, y):\n slope = linregress(x, y)\n return slope",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def slope_from_origin(self):\n\n return (self.y / self.x)",
"def get_slope(self) -> str:\n return self.query('slope,?')",
"def slope_from_origin(self):\n\n return self.y / self.x",
"def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def slope_from_origin(self):\n return round(math.degrees(abs(math.atan(self.y/self.x))), 2)",
"def _slope(A, B):\n if (B[0] - A[0]) == 0:\n return \"vertical\"\n slope = (B[1] - A[1]) / (B[0] - A[0])\n if slope == 0:\n return \"horizontal\"\n elif slope > 0:\n return \"inclined\"\n else:\n return \"declined\"",
"def slope(self, x1, y1, x2, y2):\n if x1 == x2:\n slope = np.inf\n else:\n slope = (y2-y1)/(x2-x1)\n\n return np.math.atan(slope)",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)",
"def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope",
"def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx",
"def compute_slope(x_jnt_0, y_jnt_0, x_jnt_1, y_jnt_1):\n if x_jnt_0 == x_jnt_1:\n return None\n return (y_jnt_1 - y_jnt_0) / (x_jnt_1 - x_jnt_0)",
"def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x",
"def slope(slope:float, offset=0., bounds: tuple[float, float] = None) -> core.Slope:\n return core.Slope(slope, offset, bounds=bounds)",
"def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P",
"def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]",
"def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p",
"def linear_slope(self, dim=\"time\", nan_policy=\"none\"):\n return linear_slope(self._obj, dim=dim, nan_policy=nan_policy)",
"def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:\n return self.get_line_from_axis_to_point(0, point, **kwargs)",
"def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))",
"def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:\n\n return self.get_line_from_axis_to_point(1, point, **kwargs)",
"def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)"
] |
[
"0.7364191",
"0.7166937",
"0.7103884",
"0.7045604",
"0.7019451",
"0.69471735",
"0.692469",
"0.6909021",
"0.6900345",
"0.6852268",
"0.6656526",
"0.66505015",
"0.66357017",
"0.6598907",
"0.65987",
"0.6584361",
"0.65671134",
"0.6559925",
"0.6549819",
"0.6510047",
"0.6503251",
"0.64857435",
"0.64150614",
"0.64096564",
"0.6332866",
"0.63245976",
"0.629552",
"0.62313914",
"0.612866",
"0.61255956"
] |
0.74592346
|
0
|
Return the yintercept of the straight line If the line is vertical, None is returned
|
def yintercept(self):
if self.slope() is None:
return None
else:
return self.c/self.b
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_x_y_for_line(bounds, y_intercept, slope): \n\n x = np.sort(bounds)\n\n y = y_intercept + (slope * x)\n\n return x, y",
"def get_line_end_pts(line_segment, y1, y2):\n if line_segment is None:\n return None\n\n slope, intercept = line_segment\n\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return x1, y1, x2, y2",
"def _hLine(self, y):\n left, _top, width, _height = self.plot.getPlotBoundsInPixels()\n\n dataPos1 = self.plot.pixelToData(left, y, check=False)\n dataPos2 = self.plot.pixelToData(left + width, y, check=False)\n return dataPos1, dataPos2",
"def yline(y,farright, width, dash, grayamount):\r\n aline([[0,y],[farright,y]],width, dash, grayamount)",
"def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:\n return self.get_line_from_axis_to_point(0, point, **kwargs)",
"def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)",
"def get_y_position(self):\n return self.actual_coordinates[1]",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def get_y(self):\n return self.coords[1]",
"def get_y(self, x):\n p, y = self.get_p_y(x)\n return y",
"def get_y_coordinate(height, rank):\n # Divided the line chart frame by MAX_RANK vertically and equally AND get y by the current rank.\n if rank > MAX_RANK:\n # Set y as the bottom frame line when the current rank is over MAX_RANK.\n y = height - GRAPH_MARGIN_SIZE\n else:\n y = (height - GRAPH_MARGIN_SIZE * 2) / MAX_RANK * rank + GRAPH_MARGIN_SIZE\n return y",
"def __get_y__(self):\n return self.Direction['y']",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def findY(self):\n return self.y",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def y(self) -> int:\n return self.data.y_centre >> 4",
"def y(self):\n return self.coords[1]",
"def y(self):\n if self._y is None:\n self.compute_coordinates()\n return self._y",
"def vertical_line(numbers, p_current, relative = False):\n if len(numbers) != 1:\n return None\n\n if relative:\n p_next = Point(p_current.x, numbers[0] + p_current.y)\n else:\n p_next = Point(p_current.x, numbers[0])\n\n return Line(p_current, p_next)",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def origin_y(self):\n return self._origin[1]",
"def y(self):\n return self._coords[1]",
"def getY(self):\n y = self.getAttribute('y')\n kind = self.getKind()\n self._y = y if kind == 'pie' else None\n return self._y",
"def Find_Line_By_XY( self, x, y ):\r\n for i in self.handle_list:\r\n #examine the bounding box of each line\r\n bbox = self.canvas_one.bbox( i.line_handle )\r\n xb1 = bbox[ 0 ]\r\n yb = ( bbox[ 1 ] + bbox[ 3 ] ) / 2\r\n xb2 = bbox[ 2 ]\r\n if x >= xb1 and x <= xb2 and abs( y-yb ) <= cb.ytick / 2:\r\n #found, return handle\r\n return i\r\n #not found return -1\r\n return -1",
"def is_vertical(self):\n return self.slope == float(\"+inf\")",
"def xintercept(self):\n if self.slope() == 0:\n return None\n else:\n return self.c/self.a",
"def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)",
"def get_walking_line(self):\n\t\treturn self._bottom_rect.move(0,1)",
"def get_y(self):\n return self.__y",
"def getHeight(self, point):\n return Line(point, self.normal_vector.angle)"
] |
[
"0.6660918",
"0.6624468",
"0.6600898",
"0.6539037",
"0.6424764",
"0.6338208",
"0.5991932",
"0.5986083",
"0.5951831",
"0.5908803",
"0.5907358",
"0.5875608",
"0.5825497",
"0.5823314",
"0.5785119",
"0.57730496",
"0.57594156",
"0.57528734",
"0.5750194",
"0.57497287",
"0.57426494",
"0.57189596",
"0.57174563",
"0.57165897",
"0.5714905",
"0.5702758",
"0.57027423",
"0.56829137",
"0.5680786",
"0.56771946"
] |
0.75867695
|
0
|
Return the xintercept of the straight line If the line is horizontal, None is returned
|
def xintercept(self):
if self.slope() == 0:
return None
else:
return self.c/self.a
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def line(intercept, slope, x):\n return slope*x + intercept",
"def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:\n\n return self.get_line_from_axis_to_point(1, point, **kwargs)",
"def begining_of_line():\r\n set_point(point().begining_of_line())",
"def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)",
"def get_x_y_for_line(bounds, y_intercept, slope): \n\n x = np.sort(bounds)\n\n y = y_intercept + (slope * x)\n\n return x, y",
"def _hLine(self, y):\n left, _top, width, _height = self.plot.getPlotBoundsInPixels()\n\n dataPos1 = self.plot.pixelToData(left, y, check=False)\n dataPos2 = self.plot.pixelToData(left + width, y, check=False)\n return dataPos1, dataPos2",
"def yintercept(self):\n if self.slope() is None:\n return None\n else:\n return self.c/self.b",
"def get_line_start(self):\n return self._line_start",
"def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"line\", x=x, y=y, **kwargs)",
"def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def _get_line(self, line: int) -> str:\n line_offsets_with_sentinel = self._line_offsets + [len(self._text)]\n return self._text[line_offsets_with_sentinel[line]:line_offsets_with_sentinel[line+1]]",
"def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def _line(x,m,c):\n return m*x+c",
"def _line(x,m,c):\n return m*x+c",
"def get_startline(self):\n return self.get_attribute(\"startline\")",
"def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)",
"def getXPos(self, tline, x):\n # Find the TWord object\n words = tline.twords\n j = 0 # word index\n imax = len(words) - 1\n for w in words:\n # Find out if the point is in this word -\n # need to include half the space width after the word, if there\n # is a following word.\n x0 = w.getX()\n x1 = x0 + w.getWidth()\n if (j == imax): break\n x2 = words[j+1].getX()\n spw = (x2 - x1)/2\n if (x < x1 + spw): break\n j += 1\n\n word = words[j]\n\n # Then the character\n xvec = word.getOffsets()\n\n k = 0\n if xvec:\n xo = x - x0 # xo is x relative to word start\n p0 = 0.0\n for p in xvec:\n p1 = xvec[k]\n if (xo < (p0 + p1)/2): break\n k += 1\n p0 = p1\n\n return (word, k)",
"def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)",
"def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)",
"def add_intercept(self, x):\n\t\tif len(x) < 1 or type(x) is not np.ndarray:\n\t\t\treturn None\n\t\treturn np.c_[np.ones(x.shape[0]), x]",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def get_lx(self):\r\n return int(self.dx * self.nx - self.ox)",
"def get_x(self):\n return self.coords[0]",
"def get_fit_x(self, y):\n if self.line_fit_m.size == 0:\n return np.empty(y.shape)\n fit = self.line_fit\n return np.array(fit[0] * y ** 2 + fit[1] * y + fit[2]).astype(\"int\")",
"def get_x_position(self):\n return self.actual_coordinates[0]",
"def _vLine(self, x):\n _left, top, _width, height = self.plot.getPlotBoundsInPixels()\n\n dataPos1 = self.plot.pixelToData(x, top, check=False)\n dataPos2 = self.plot.pixelToData(x, top + height, check=False)\n return dataPos1, dataPos2",
"def line(self):\n if self.__line is None:\n left = self.__source.rfind(\"\\n\", 0, self.__offset) + 1\n right = self.__source.find(\"\\n\", self.__offset)\n\n self.__line = self.__source[left : right]\n self.__lineOffset = self.__offset - left\n\n return self.__line",
"def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start"
] |
[
"0.64042056",
"0.6259456",
"0.6209308",
"0.6192073",
"0.6158778",
"0.60954744",
"0.60718787",
"0.5957172",
"0.5929775",
"0.5878907",
"0.58247787",
"0.58001995",
"0.5797645",
"0.57681197",
"0.57325584",
"0.57325584",
"0.5690906",
"0.568494",
"0.56811166",
"0.56772906",
"0.5662098",
"0.5659002",
"0.56569993",
"0.56501466",
"0.56454957",
"0.5631174",
"0.5626848",
"0.5624125",
"0.5617648",
"0.56129587"
] |
0.7362088
|
0
|
Return True if L is parallel to the straight line, and False otherwise
|
def parallel(self, L):
return self.slope() == L.slope()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def perpendicular(self, L):\n if self.slope() is None: # if the line is vertical, L must be horizontal\n return L.slope() == 0\n elif self.slope() == 0: # if the line is horizontal, L must be vertical\n return L.slope() is None\n else:\n return self.slope() * L.slope() == -1",
"def isOnLine(self, point):\n if((point < self.start and point < self.end) or (\n point > self.start and point > self.end)):\n return False #point is not between the start and end of self\n \n if(self.getArea(self.start, self.end, point) > c.EPSILON):\n return False #points are not co-linear\n \n return True",
"def areParallel(self, line):\n # A vector perpendicular to line1\n perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])\n # Farin-Hansford eq 3.14\n cosTheda = (np.dot(perpVect, line.vector)/\n (np.linalg.norm(perpVect)*np.linalg.norm(line.vector)))\n # if cosTheda is < c.EPSILON then the lines are parallel and we return True\n return abs(cosTheda) < c.EPSILON",
"def is_straight_line(self, arr):\n # First pair of point (x0, y0) \n x0 = arr[0][0]\n y0 = arr[0][1]\n\n # Second pair of point (x1, y1) \n x1 = arr[len(arr) - 1][0]\n y1 = arr[len(arr) - 1][1]\n\n dx = x1 - x0\n dy = y1 - y0\n\n # Loop to iterate over the points \n for i in range(len(arr)):\n x = arr[i][0]\n y = arr[i][1]\n\n if (dx * (y - y1) - dy * (x - x1)) > self.movement_tolerance:\n return False\n\n return True",
"def ll(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('ll() expects two lines')\n return L1.normal_vector() ** L2.normal_vector() == 0",
"def isinsidelineXY(l,p):\n\n return linePointXY(l,p,distance=True) < epsilon",
"def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])",
"def _l_(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('_l_() expects two lines')\n return L1.normal_vec() * L2.normal_vec() == 0",
"def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]",
"def checkStraightLine(coordinates: List[List[int]]) -> bool:\n\t# initializing our comparison slope value\n\tnum = coordinates[1][1] - coordinates[0][1]\n\tden = coordinates[1][0] - coordinates[0][0]\n\tif den == 0:\n\t\tslope = math.inf\n\telse:\n\t\tslope = num / den\n\n\t# checking the initial slope against all other slopes\n\tslope_check = 0\n\tfor i in range(2, len(coordinates)):\n\t\tnum = coordinates[i][1] - coordinates[i-1][1]\n\t\tden = coordinates[i][0] - coordinates[i-1][0]\n\t\tif den == 0:\n\t\t\tslope_check = math.inf\n\t\telse:\n\t\t\tslope_check = num/den\n\n\t\tif slope_check != slope:\n\t\t\treturn False\n\n\treturn True",
"def in_line(pi, pj, pk):\n # compute cross product\n dxc = pk.x - pi.x;\n dyc = pk.y - pi.y;\n\n dxl = pj.x - pi.x;\n dyl = pj.y - pi.y;\n\n cross = dxc * dyl - dyc * dxl;\n\n return True if cross == 0 else False",
"def is_line_on_line(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n line_on_line = False\n\n for coords in feature_1:\n\n line_on_line = boolean_point_on_line(coords, feature_2)\n if not line_on_line:\n break\n\n return line_on_line",
"def _parallel(*segments):\n if not all(isinstance(s, Line) for s in segments):\n raise TypeError(\"Line._parallel requires all Line objects\")\n\n unique_segments = list(set(segments))\n\n if len(unique_segments) == 0:\n return False\n elif len(unique_segments) == 1:\n return True\n else:\n # take the first segment and translate it to the origin\n first_translated_seg = Line([Point3(0, 0, 0), (segments[0].end - segments[0].start)])\n\n # the given segments are parallel if they are all parallel to the first\n for s in segments[1:]:\n translated_seg = Line([Point3(0, 0, 0), (s.end - s.start)])\n if not first_translated_seg.is_collinear_with(translated_seg):\n return False\n\n return True",
"def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])",
"def is_on_line(p0, p1, p2, threshold = 0.01):\n p0, p1, p2 = map(lambda tup : np.array(tup[:2]), [p0, p1, p2])\n p1 -= p0\n p2 -= p0\n return abs((p1[0] / p1[1]) - (p2[0] / p2[1])) < threshold",
"def isPointOnLine(node1, node2, point):\n m, b, d = geometry.lineSpec(node1, node2)\n if d == -1: # if two nodes are the same\n if node1 == point:\n return True\n else:\n return False\n else:\n if m == True: # parallel to y axis\n if point[0] == b and \\\n (((node1[1] <= point[1]) and (point[1] <= node2[1])) or\\\n ((node2[1] <= point[1]) and (point[1] <= node1[1]))):\n return True\n else:\n return False\n \n elif m == False:\n if point[1] == b and \\\n (((node1[0] <= point[0]) and (point[0] <= node2[0])) or\\\n ((node2[0] <= point[0]) and (point[0] <= node1[0]))):\n return True\n else:\n return False\n \n else:\n if(abs(point[1] - (m*point[0] + b)) < 0.05) and \\\n (((node1[0] <= point[0]) and (point[0] <= node2[0])) or\\\n ((node2[0] <= point[0]) and (point[0] <= node1[0]))) and\\\n (((node1[1] <= point[1]) and (point[1] <= node2[1])) or\\\n ((node2[1] <= point[1]) and (point[1] <= node1[1]))):\n return True\n else:\n return False",
"def belongsToLine(self, index, direction, line): \n first_point = 1 \n if direction == 'Horizontal': # Check if index's y coordinate is the same as line's first point\n if self.getCoordinates(index)[1] == self.getCoordinates(line[first_point])[1]:\n return True\n elif direction == 'Vertical': # Check if index's x coordinate is the same as line's first point\n if self.getCoordinates(index)[0] == self.getCoordinates(line[first_point])[0]:\n return True\n else:\n x, y = self.getCoordinates(index)\n\n if direction == 'D-pos' and x == y: # points in positive diagonal have equal x and y coordinates\n return True\n if direction == 'D-neg' and x + y == self.size - 1: # some of coordinates negative diagonal point is n -1\n return True\n return False",
"def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0",
"def is_point_on_polyline(point, polyline, tol=0.0):\n for i in xrange(len(polyline) - 1):\n a = polyline[i]\n b = polyline[i + 1]\n c = closest_point_on_segment(point, (a, b))\n if distance_point_point(point, c) <= tol:\n return True\n return False",
"def is_intersection_line_line(ab, cd, epsilon=1e-6):\n a, b = ab\n c, d = cd\n\n line_vector_1 = normalize_vector(vector_from_points(a, b))\n line_vector_2 = normalize_vector(vector_from_points(c, d))\n # check for parallel lines\n print(abs(dot_vectors(line_vector_1, line_vector_2)))\n if abs(dot_vectors(line_vector_1, line_vector_2)) > 1.0 - epsilon:\n return False\n # check for intersection\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n if dot_vectors(d_vector, subtract_vectors(c, a)) == 0:\n return True\n return False",
"def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def point_on_line(point:tuple, line:tuple, d_y:float, d_x:float, b:float)->bool:\n if not near_segment(point, line):\n # Fast fail to handle cases where the point isn't in the bounding rectangle of the line segment.\n return False\n if b == None and point[0] == line[0][0]:\n return True\n return d_y * point[0] == (point[1] - b) * d_x",
"def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v",
"def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b",
"def is_horizontal(line:tuple)->bool:\n return line[0][1] == line[1][1]",
"def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n",
"def line_ccw(a, b, p):\n return (p[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (p[0] - a[0])",
"def isAnyLineAt(self, x, y):\n return (self.isLineAt(x, y, 1, 0) or # Horizontal\n self.isLineAt(x, y, 0, 1) or # Vertical\n self.isLineAt(x, y, 1, 1) or # Diagonal up\n self.isLineAt(x, y, 1, -1)) # Diagonal down",
"def is_librating(triple):\n return triple.CKL <= 0"
] |
[
"0.7166783",
"0.7142611",
"0.6976377",
"0.69568163",
"0.6914358",
"0.68621224",
"0.68530166",
"0.68178743",
"0.6817851",
"0.6717622",
"0.6670572",
"0.6657986",
"0.66080886",
"0.6605518",
"0.657581",
"0.65120167",
"0.6476982",
"0.64696604",
"0.6432958",
"0.64147866",
"0.63802195",
"0.63041043",
"0.629616",
"0.6224388",
"0.6198995",
"0.6184114",
"0.6177646",
"0.6161296",
"0.6146759",
"0.6144351"
] |
0.78763753
|
0
|
Return True if L is perpendicular to the straight line, and False otherwise
|
def perpendicular(self, L):
if self.slope() is None: # if the line is vertical, L must be horizontal
return L.slope() == 0
elif self.slope() == 0: # if the line is horizontal, L must be vertical
return L.slope() is None
else:
return self.slope() * L.slope() == -1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_perpendicular_to(self, vector):\n\n if abs(self.dot(vector)) < 0.01:\n return True\n return False",
"def ll(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('ll() expects two lines')\n return L1.normal_vector() ** L2.normal_vector() == 0",
"def _l_(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('_l_() expects two lines')\n return L1.normal_vec() * L2.normal_vec() == 0",
"def isinsidelineXY(l,p):\n\n return linePointXY(l,p,distance=True) < epsilon",
"def parallel(self, L):\n return self.slope() == L.slope()",
"def isOnLine(self, point):\n if((point < self.start and point < self.end) or (\n point > self.start and point > self.end)):\n return False #point is not between the start and end of self\n \n if(self.getArea(self.start, self.end, point) > c.EPSILON):\n return False #points are not co-linear\n \n return True",
"def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]",
"def is_straight_line(self, arr):\n # First pair of point (x0, y0) \n x0 = arr[0][0]\n y0 = arr[0][1]\n\n # Second pair of point (x1, y1) \n x1 = arr[len(arr) - 1][0]\n y1 = arr[len(arr) - 1][1]\n\n dx = x1 - x0\n dy = y1 - y0\n\n # Loop to iterate over the points \n for i in range(len(arr)):\n x = arr[i][0]\n y = arr[i][1]\n\n if (dx * (y - y1) - dy * (x - x1)) > self.movement_tolerance:\n return False\n\n return True",
"def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0",
"def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])",
"def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b",
"def areParallel(self, line):\n # A vector perpendicular to line1\n perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])\n # Farin-Hansford eq 3.14\n cosTheda = (np.dot(perpVect, line.vector)/\n (np.linalg.norm(perpVect)*np.linalg.norm(line.vector)))\n # if cosTheda is < c.EPSILON then the lines are parallel and we return True\n return abs(cosTheda) < c.EPSILON",
"def distancetoline(p, l1, l2):\n vx = l1.x-p.x \n vy = l1.y-p.y\n ux = l2.x-l1.x\n uy = l2.y-l1.y\n\n length = ux*ux+uy*uy;\n\n det = (-vx*ux)+(-vy*uy); \n # if this is < 0 or > length then its outside the line segment\n if det<0 or det>length:\n ux=l2.x-p.x\n uy=l2.y-p.y\n return sqrt(min(vx*vx+vy*vy, ux*ux+uy*uy))\n\n det = ux*vy-uy*vx\n if length == 0.0:\n return 0.0\n else:\n return sqrt((det*det)/length)",
"def checkStraightLine(coordinates: List[List[int]]) -> bool:\n\t# initializing our comparison slope value\n\tnum = coordinates[1][1] - coordinates[0][1]\n\tden = coordinates[1][0] - coordinates[0][0]\n\tif den == 0:\n\t\tslope = math.inf\n\telse:\n\t\tslope = num / den\n\n\t# checking the initial slope against all other slopes\n\tslope_check = 0\n\tfor i in range(2, len(coordinates)):\n\t\tnum = coordinates[i][1] - coordinates[i-1][1]\n\t\tden = coordinates[i][0] - coordinates[i-1][0]\n\t\tif den == 0:\n\t\t\tslope_check = math.inf\n\t\telse:\n\t\t\tslope_check = num/den\n\n\t\tif slope_check != slope:\n\t\t\treturn False\n\n\treturn True",
"def Perpendicular(self, line: Line, point: Point, interesting=True) -> Line:\n if point in line:\n return self.ErectPerpendicular(line, point, interesting=interesting)\n else:\n return self.DropPerpendicular(line, point, interesting=interesting)",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def in_line(pi, pj, pk):\n # compute cross product\n dxc = pk.x - pi.x;\n dyc = pk.y - pi.y;\n\n dxl = pj.x - pi.x;\n dyl = pj.y - pi.y;\n\n cross = dxc * dyl - dyc * dxl;\n\n return True if cross == 0 else False",
"def side_points(p, v, L): \r\n u = np.array([-v[1], v[0]]) # positive normal of v:\r\n N = list() # list of points on one side of the line p,v:\r\n for k in range(len(L)):\r\n if (L[k] - p).dot(u) >= 0:\r\n N.append(L[k])\r\n \r\n return N",
"def is_point_on_line(point, line, tol=0.0):\n d = distance_point_line(point, line)\n return d <= tol",
"def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t",
"def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])",
"def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)",
"def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)",
"def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n",
"def belongsToLine(self, index, direction, line): \n first_point = 1 \n if direction == 'Horizontal': # Check if index's y coordinate is the same as line's first point\n if self.getCoordinates(index)[1] == self.getCoordinates(line[first_point])[1]:\n return True\n elif direction == 'Vertical': # Check if index's x coordinate is the same as line's first point\n if self.getCoordinates(index)[0] == self.getCoordinates(line[first_point])[0]:\n return True\n else:\n x, y = self.getCoordinates(index)\n\n if direction == 'D-pos' and x == y: # points in positive diagonal have equal x and y coordinates\n return True\n if direction == 'D-neg' and x + y == self.size - 1: # some of coordinates negative diagonal point is n -1\n return True\n return False",
"def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)",
"def is_point_on_polyline(point, polyline, tol=0.0):\n for i in xrange(len(polyline) - 1):\n a = polyline[i]\n b = polyline[i + 1]\n c = closest_point_on_segment(point, (a, b))\n if distance_point_point(point, c) <= tol:\n return True\n return False",
"def is_intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n return True\n return False",
"def line_ccw(a, b, p):\n return (p[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (p[0] - a[0])",
"def point_on_line(point:tuple, line:tuple, d_y:float, d_x:float, b:float)->bool:\n if not near_segment(point, line):\n # Fast fail to handle cases where the point isn't in the bounding rectangle of the line segment.\n return False\n if b == None and point[0] == line[0][0]:\n return True\n return d_y * point[0] == (point[1] - b) * d_x"
] |
[
"0.7048503",
"0.7046053",
"0.6906444",
"0.68979657",
"0.68731594",
"0.67885673",
"0.67519236",
"0.6713218",
"0.66297054",
"0.65473384",
"0.6544193",
"0.6527107",
"0.6505562",
"0.64810836",
"0.6360246",
"0.63600093",
"0.6325344",
"0.6318955",
"0.6314899",
"0.6292119",
"0.6279408",
"0.6273495",
"0.6273495",
"0.62479424",
"0.62439096",
"0.62426406",
"0.6221961",
"0.62062526",
"0.6202249",
"0.61982936"
] |
0.86484283
|
0
|
Return the intersection point (a 2tuple) of L with the straight line. If the line is parallel to L, None is returned.
|
def intersection(self, L):
if self.slope() == L.slope():
return None
intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)
intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)
return (intpt_xcood, intpt_ycood)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None",
"def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)",
"def intersection(self, line):\n\t\tdenom = (line.b[1]-line.a[1])*(self.b[0]-self.a[0]) - (line.b[0]-line.a[0])*(self.b[1]-self.a[1])\n\t\t# denominator is 0 if lines are parallel\n\t\tif denom == 0:\n\t\t\treturn None\n\t\t\n\t\tnum_a = (line.b[0]-line.a[0])*(self.a[1]-line.a[1]) - (line.b[1]-line.a[1])*(self.a[0]-line.a[0])\n\t\tnum_b = (self.b[0]-self.a[0])*(self.a[1]-line.a[1]) - (self.b[1]-self.a[1])*(self.a[0]-line.a[0])\n\t\t# if both numerators are 0 then lines are coincident\n\t\tif num_a==0 and num_b==0:\n\t\t\treturn None\n\t\t\t\n\t\tu_a = num_a/denom\n\t\tu_b = num_b/denom\n\t\t\t\n\t\tif 0 <= u_a <= 1 and 0 <= u_b <= 1:\n\t\t\treturn self.a + uA*(self.b-self.a)\n\t\telse:\n\t\t\treturn None",
"def intersection(self, l):\n closest = self.closest_point(l)\n return closest if l.contains(closest) else None",
"def LineLineIntersection(lineA, lineB):\n lineA = rhutil.coerceline(lineA, True)\n lineB = rhutil.coerceline(lineB, True)\n rc, a, b = Rhino.Geometry.Intersect.Intersection.LineLine(lineA, lineB)\n if not rc: return None\n return lineA.PointAt(a), lineB.PointAt(b)",
"def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b",
"def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)",
"def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]",
"def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result",
"def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n",
"def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None",
"def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled",
"def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp",
"def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)",
"def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y",
"def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P",
"def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None",
"def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}",
"def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)",
"def crossLine(self, other):\n if self.parallel(other): return None\n line = self.getLine()\n point = other.crossLine(line)\n if point is not None:\n if point in self and point in other:\n return point",
"def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)",
"def intersection(self, segment):\n intersection = self.hyperplane.intersection(segment)\n if intersection is not None and np.linalg.norm(intersection - self.closest_point_to(intersection)) < epsilon:\n return intersection\n\n return None",
"def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)",
"def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)",
"def intersect_shape_by_line(topods_shape, line, low_parameter=0.0, hi_parameter=float(\"+inf\")):\n from OCC.Core.IntCurvesFace import IntCurvesFace_ShapeIntersector\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(topods_shape, TOLERANCE)\n shape_inter.PerformNearest(line, low_parameter, hi_parameter)\n\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n return (shape_inter.Pnt(1),\n shape_inter.Face(1),\n shape_inter.UParameter(1),\n shape_inter.VParameter(1),\n shape_inter.WParameter(1))",
"def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result"
] |
[
"0.74953175",
"0.72366256",
"0.71736336",
"0.7018175",
"0.6921521",
"0.6889775",
"0.67568314",
"0.67477953",
"0.6587873",
"0.6492234",
"0.6457997",
"0.6406959",
"0.6392092",
"0.6383066",
"0.63744235",
"0.6335968",
"0.6276457",
"0.62747335",
"0.6262454",
"0.626192",
"0.6230714",
"0.62034535",
"0.6198294",
"0.61698824",
"0.6164639",
"0.61449766",
"0.61392343",
"0.6138859",
"0.6131062",
"0.6103579"
] |
0.72639793
|
1
|
record is a 2 element list [personA, personB]
|
def mapper(record):
personA = record[0]
personB = record[1]
mr.emit_intermediate(personA, personB)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def RECORD(record_or_list, dates_as_iso=False, expand_refs=0):\n if isinstance(record_or_list, Record):\n return _prepare_record_dict(record_or_list, dates_as_iso=dates_as_iso, expand_refs=expand_refs)\n\n try:\n records = list(record_or_list)\n assert all(isinstance(r, Record) for r in records)\n except Exception:\n raise ValueError('RECORD() requires a Record or an iterable of Records')\n\n return [_prepare_record_dict(r, dates_as_iso=dates_as_iso, expand_refs=expand_refs)\n for r in records]",
"def data_comparison(observations, records, record):\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{records[record]}\"):\n if not re.search(\n observations[observation], f\"{records[record]}\"\n ):\n records[record] = (\n f\"{records[record]}\"\n + \" --> \"\n + observations[observation]\n )\n except Exception as ex:\n Common.logger.warning(f\"Exception happened in data comparison {ex}\")\n return records",
"def collection_creation(record_object):\n record = [rec_ob for rec_ob in record_object]\n return record",
"def record_updater(records, observations):\n for record in records:\n try:\n record = ast.literal_eval(records[record])\n except Exception:\n record = record\n try:\n if type(records[record]) is dict:\n records[record] = Common.record_updater(\n records[record], observations\n )\n elif type(records[record]) is list:\n list_records = []\n for list_record in records[record]:\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{list_record}\"):\n if not re.search(\n observations[observation],\n f\"{records[record]}\",\n ):\n if not re.search(\"-->\", f\"{list_record}\"):\n list_records.append(\n f\"{list_record}\"\n + \" --> \"\n + observations[observation]\n )\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n except Exception as ex:\n Common.logger.warning(\n f\"Exception happened in observation comparison {ex}\"\n )\n records[record] = list_records\n else:\n records = Common.data_comparison(observations, records, record)\n except Exception:\n records = Common.data_comparison(observations, records, record)\n return records",
"def select_data_from_record(self, record):\n x = record['input_ids']\n y = record['label_ids']\n return (x, y)",
"def label_record_pair(self, label, record_pair):\n\n if label == 'y':\n self.labeled_examples['match'].append(record_pair)\n elif label == 'n':\n self.labeled_examples['distinct'].append(record_pair)\n elif label == 'u':\n record_pair = ()\n elif label == 'f':\n print('Finished labeling')\n self.__create_uncertain_pairs_file()",
"def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)",
"def create_recordid_list(rec_ids):\n rec_list = []\n for row in rec_ids:\n rec_list.append(row[0])\n return rec_list",
"def get_record_meta(record_list):\n acc_code = record_list[0]\n organism = record_list[1]\n EC_code = record_list[2].replace(\"__\", \" \")\n species = record_list[3].replace(\"__\", \" \")\n note = record_list[4]\n return acc_code, organism, EC_code, species, note",
"def getRecordsByAttributePair(table, attribute1, attribute2, values):\n recordSets = queryByAttributePair(table, attribute1, attribute2, values)\n records = []\n for recordSet in recordSets:\n records.extend(list(recordSet))\n return records",
"def pair_records():\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n\r\n # check each study\r\n for study in study_list:\r\n\r\n df_meta = retrieve_meta(study)\r\n recordNames = list(df_meta['recordName'])\r\n\r\n # create column to list wearableName and coregister records\r\n df_meta = add_wearableName(df_meta)\r\n df_meta['coregisterRecords'] = recordNames\r\n\r\n # look for paired records using the unix time stamp for when the record begins\r\n for recordA in recordNames:\r\n\r\n i = df_meta[ df_meta['recordName']== recordA].index.values[0]\r\n recordBeginA = df_meta.loc[i, 'recordBegin' ]\r\n wearableA = df_meta.loc[i, 'wearableName' ]\r\n\r\n for recordB in recordNames:\r\n\r\n j = df_meta[ df_meta['recordName']== recordB].index.values[0]\r\n recordBeginB = df_meta.loc[j, 'recordBegin' ]\r\n wearableB = df_meta.loc[j, 'wearableName' ]\r\n\r\n if abs(recordBeginA - recordBeginB) < 300:\r\n\r\n if recordA != recordB:\r\n\r\n if wearableA != wearableB:\r\n\r\n print('coregister record found for ' + recordA + ' + ' + recordB)\r\n coregisterList = str(recordA + ' ' + recordB)\r\n df_meta.loc[i, 'coregisterRecords' ] = coregisterList\r\n\r\n save_meta(study, df_meta)",
"def geneSpecificRecord (self, orfList, headList, num):\n sequenceInfo = []\n for gene in orfList: # Finds target gene in each genome\n sequenceInfo.append(gene[num]) # ***any gene can be utilized***\n longestLength = max(len(s) for s in sequenceInfo) # gets longest seq to match length with gap characters\n paddedSequences = [s.ljust(longestLength, '-') for s in sequenceInfo] # Adds gap characters\n \n records = (SeqRecord(Seq(s), id = str(paddedSequences.index(s))) for s in paddedSequences) #creating a SeqRecord\n return(records)",
"def _appendRecord(self, rec, reclistname):\n if reclistname not in self.__dict__: # if not already an attrib\n self.__dict__[reclistname] = [] # init it\n self.__dict__[reclistname].append(rec) # append this record to its list",
"def record(records: list,\n method=\"\",\n method_uuid=\"\",\n indicator=\"\",\n indicator_uuid=\"\",\n indicator_unit=\"\",\n flow=\"\",\n flow_uuid=\"\",\n flow_category=\"\",\n flow_unit=\"\",\n cas_number=\"\",\n location=\"\",\n location_uuid=\"\",\n factor=0.0) -> list:\n records.append([\n method,\n method_uuid,\n indicator,\n indicator_uuid,\n indicator_unit,\n flow,\n flow_uuid,\n flow_category,\n flow_unit,\n cas_number,\n location,\n location_uuid,\n factor])\n return records",
"def mapper1_extract_user_business(self,_,record):\n yield [record['user_id'], record['business_id']]",
"def list_people():\n\n person_list = []\n for person in person_database:\n person_list.append(person)\n return person_list",
"def add_record(self, record):\n pass",
"def parse_record(self, record):\n raise NotImplementedError()",
"def handle_record_sequence(self, record_sequence):\n result = []\n for record in record_sequence:\n result.append(self.handle_record(record))\n return result",
"def part2(records):\n _, root = Node.build(records)\n return root.value()",
"def _add_parsed_record(record, records):\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')",
"def extract_A_records(self, records):\n a_records = []\n for record in records:\n if record[\"type\"] == \"A\":\n a_records.append(record)\n\n #Check the results of Arecords\n if len(a_records) > 1:\n raise Exception() # \"A\" record should be a one.\n elif a_records == []: # No \"A\" records\n return None\n else:\n return a_records[0]",
"def parse(records):\n # Collect all people.\n people = {}\n for record in records:\n if record.rec_type == \"INDI\":\n assert record.rec_id not in people\n people[record.rec_id] = Person(record)\n # Link parents and children.\n for record in records:\n if record.rec_type == \"FAM\":\n # For this \"family unit\" collect all parents and all children.\n parents = []\n children = []\n for sub_rec in record.sub_recs:\n if sub_rec.rec_type in (\"HUSB\", \"WIFE\"):\n parents.append(sub_rec.data)\n elif sub_rec.rec_type == \"CHIL\":\n children.append(sub_rec.data)\n # Ignore MARR, DATE, PLAC, ...\n # Add parent/child relationships.\n for child_id in children:\n child = people[child_id]\n for parent_id in parents:\n parent = people[parent_id]\n child.parents.append(parent)\n parent.children.append(child)\n return people",
"def emit(self, record):\n if self.list is not None:\n try:\n self.r.lpush(self.list, json.dumps(self.format(record)))\n except Exception:\n self.handleError(record)",
"def get_orcids_for_push(record):\n orcids_on_record = []\n author_recids_with_claims = []\n for author in record.get(\"authors\", []):\n orcids_in_author = get_values_for_schema(author.get(\"ids\", []), \"ORCID\")\n if orcids_in_author:\n orcids_on_record.extend(orcids_in_author)\n if author.get(\"curated_relation\") is True and \"record\" in author:\n author_recids_with_claims.append(get_recid_from_ref(author[\"record\"]))\n author_records = AuthorsRecord.get_records_by_pids(\n (\"aut\", str(recid)) for recid in author_recids_with_claims\n )\n\n all_ids = (author.get(\"ids\", []) for author in author_records)\n orcids_in_authors = chain.from_iterable(\n get_values_for_schema(ids, \"ORCID\") for ids in all_ids\n )\n\n return chain(orcids_on_record, orcids_in_authors)",
"def dump_record(record):\n rec = E.record()\n\n leader = record.get('leader')\n if leader:\n rec.append(E.leader(leader))\n\n if isinstance(record, GroupableOrderedDict):\n items = record.iteritems(with_order=False, repeated=True)\n else:\n items = iteritems(record)\n\n for df, subfields in items:\n # Control fields\n if len(df) == 3:\n if isinstance(subfields, string_types):\n controlfield = E.controlfield(subfields)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n elif isinstance(subfields, (list, tuple, set)):\n for subfield in subfields:\n controlfield = E.controlfield(subfield)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n else:\n # Skip leader.\n if df == 'leader':\n continue\n\n if not isinstance(subfields, (list, tuple, set)):\n subfields = (subfields,)\n\n df = df.replace('_', ' ')\n for subfield in subfields:\n if not isinstance(subfield, (list, tuple, set)):\n subfield = [subfield]\n\n for s in subfield:\n datafield = E.datafield()\n datafield.attrib['tag'] = df[0:3]\n datafield.attrib['ind1'] = df[3]\n datafield.attrib['ind2'] = df[4]\n\n if isinstance(s, GroupableOrderedDict):\n items = s.iteritems(with_order=False, repeated=True)\n elif isinstance(s, dict):\n items = iteritems(s)\n else:\n datafield.append(E.subfield(s))\n\n items = tuple()\n\n for code, value in items:\n if not isinstance(value, string_types):\n for v in value:\n datafield.append(E.subfield(v, code=code))\n else:\n datafield.append(E.subfield(value, code=code))\n\n rec.append(datafield)\n return rec",
"def emit_marker(record):\n logging.debug(\"Formatting individual record {}\".format(record))\n global individual_markers \n marker = record.copy()\n # logging.debug(\"Emitting individual marker: {}\".format(marker))\n individual_markers.append(marker)",
"def add(self, record):\n if record.name != 'consensus':\n self.members.append(record)",
"def select_data_from_record(self, record):\n x, y = {}, {}\n for name, tensor in record.items():\n if name in ('start_positions', 'end_positions'):\n y[name] = tensor\n elif name == 'input_ids':\n x['input_word_ids'] = tensor\n elif name == 'segment_ids':\n x['input_type_ids'] = tensor\n else:\n x[name] = tensor\n return (x, y)",
"def triage_record(self, record):\n # Filter out any garbage commands/responses\n # Record any changes to the reader state\n # Pass OBD2 records on for formatting\n \n # We need to figure out whether this record is :\n # - line noise / garbage \"?\"\n # - the result of an \"AT\" command \n # - the result of an OBD2 command \n \n # skip over garbage \n if record == [] \\\n or record[0] == [] \\\n or record[0][0] == '' \\\n or record[0][0] == '?' :\n #print \"Garbage record. Skipping.\"\n return []\n\n # handle ELM327 errors\n # \"?\" - unrecognized command\n # \"NO DATA\" - reader timed out waiting for response from vehicle\n # \"BUFFER FULL\" - need to read data from reader faster, ie. increase baud rate on serial connection\n # many more...\n if len(record) > 1 :\n if record[1][0] == '?' \\\n or record[1][0] == 'NO':\n #print \"Garbage record. Skipping.\"\n return []\n\n # record the changes made by AT commands\n cmd = str.upper(record[0][0])\n if cmd[0:2] == 'AT':\n self.interpret_at_cmd(record)\n return []\n \n # remove \"SEARCHING...\" from response\n # example:\n # >0100\n # SEARCHING...\n # 41 00 BE 3E A8 11 \n if len(record) > 1 :\n if record[1][0] == 'SEARCHING...':\n record.pop(1)\n\n # BUFFER FULL - ugh, need to speed up the serial connection\n rl = len(record)\n rec = 0\n while rec < rl:\n if record[rec][0] == 'BUFFER' and record[rec][1] == 'FULL':\n record.pop(rec)\n print \" ERROR - BUFFER FULL - Increase speed of serial connection\"\n #return []\n rec += 1\n # \"BUS BUSY\", \"CAN ERROR\", ???\n\n # if we get a 7F, that means there was an error\n # 10 - general reject\n # 11 - service not supported\n # 12 - subfunction not supported OR invalid format\n # 21 - busy repeat\n # 22 - conditions or sequence not correct \n # 78 - response pending\n if record[1][0] == '7F':\n mode = record[1][1]\n err = record[1][2]\n if err == 10:\n print \"General Error -- Mode:\", mode\n elif err == 11:\n print \"Service Not Supported Error -- Mode:\", mode\n elif err == 12:\n print \"Subfunction Not Supported or Invalid Format Error -- Mode:\", mode\n elif err == 21:\n print \"BUSY, Repeat -- Mode:\", mode\n elif err == 22:\n print \"Conditions or Sequence Not Correct -- Mode:\", mode\n elif err == 78:\n print \"Unknown Error -- Mode:\", mode, \" -- Error code:\", err\n return []\n\n\n # format an OBD 2 command for further processing at a higher layer\n try:\n obd2_record = self.format_obd2_record(record)\n except self.ErrorIncompleteRecord:\n print \"Garbage record. Skipping.\"\n return []\n \n return obd2_record"
] |
[
"0.6047177",
"0.59028745",
"0.57715553",
"0.5690277",
"0.56728095",
"0.5669321",
"0.558235",
"0.5566881",
"0.55136764",
"0.54689837",
"0.54680896",
"0.5428929",
"0.54203427",
"0.5380737",
"0.53656065",
"0.5343676",
"0.53307116",
"0.5328262",
"0.53270775",
"0.53258866",
"0.531925",
"0.52762926",
"0.5274942",
"0.52729535",
"0.5255009",
"0.5219621",
"0.5193707",
"0.5168911",
"0.5152981",
"0.5142402"
] |
0.62028843
|
0
|
Raises a ValueError if the provided Message is not a FHIR reference.
|
def _validate_reference(reference: message.Message) -> None:
if not annotation_utils.is_reference(reference):
raise ValueError(
f'Message {reference.DESCRIPTOR.name} is not a FHIR reference.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)",
"def test_invalid_ref_in_property(self):\n with self.assertRaises(reschema.exceptions.InvalidReference):\n schema = list(self.sd.resources.values())[0].properties['name']\n RefSchemaProxy(schema, None)",
"def message_error_validator():\n\n return validator.MessageErrorSchema()",
"def value_from_message(self, message):\n if not isinstance(message, self.message_type):\n raise DecodeError('Expected type %s, got %s: %r' %\n (self.message_type.__name__,\n type(message).__name__,\n message))\n return message",
"def check_refferal(self, data):\n\n match = _MATCH_URL_RE.match(data)\n if match:\n raise ImapReferralsException(match.group('url'),\n match.group('message'))",
"def testMessageField_ForwardReference(self):\n global MyMessage\n global ForwardMessage\n try:\n class MyMessage(messages.Message):\n\n self_reference = messages.MessageField('MyMessage', 1)\n forward = messages.MessageField('ForwardMessage', 2)\n nested = messages.MessageField(\n 'ForwardMessage.NestedMessage', 3)\n inner = messages.MessageField('Inner', 4)\n\n class Inner(messages.Message):\n\n sibling = messages.MessageField('Sibling', 1)\n\n class Sibling(messages.Message):\n\n pass\n\n class ForwardMessage(messages.Message):\n\n class NestedMessage(messages.Message):\n\n pass\n\n self.assertEquals(MyMessage,\n MyMessage.field_by_name('self_reference').type)\n\n self.assertEquals(ForwardMessage,\n MyMessage.field_by_name('forward').type)\n\n self.assertEquals(ForwardMessage.NestedMessage,\n MyMessage.field_by_name('nested').type)\n\n self.assertEquals(MyMessage.Inner,\n MyMessage.field_by_name('inner').type)\n\n self.assertEquals(MyMessage.Sibling,\n MyMessage.Inner.field_by_name('sibling').type)\n finally:\n try:\n del MyMessage\n del ForwardMessage\n except: # pylint:disable=bare-except\n pass",
"def validate_message(self, state_id, msg):\n pass",
"def bad_request(self, message=None):\n return self.send_message(message, status=400)",
"def msg(self):\r\n if self._uris:\r\n raise AssemblerError('Message still contains missing references.')\r\n\r\n return self._msg",
"def test_type_accepted_reference_validreff(self):\n with self.assertRaises(TypeError):\n self.TEI.getValidReff(reference=[\"1\", \"pr\", \"2\", \"5\"])",
"def _check_reference(self, val) :\n\t\tdef char_check(s, not_allowed = ['#','[',']']) :\n\t\t\tfor c in not_allowed :\n\t\t\t\tif s.find(c) != -1 : return False\n\t\t\treturn True\n\t\t# Creating an artificial http URI to fool the urlparse module...\n\t\tscheme, netloc, url, query, fragment = urlsplit('http:' + val)\n\t\tif netloc != \"\" and self.state.rdfa_version >= \"1.1\" :\n\t\t\tself.state.options.add_warning(err_absolute_reference % (netloc, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telif not char_check(query) :\n\t\t\tself.state.options.add_warning(err_query_reference % (query, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telif not char_check(fragment) :\n\t\t\tself.state.options.add_warning(err_fragment_reference % (fragment, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telse :\n\t\t\treturn True",
"def _validate_senders_reference_20(self, val):\n validate_slash_and_double_slash(val, \"Senders Reference\") # .value()\n return val",
"def check_resolved(self):\n if self._definition is None:\n msg = message_factory.get_message(\n 'vapi.data.structref.not.resolved',\n self.name)\n logger.debug(msg)\n raise CoreException(msg)",
"def testMessageField(self):\n self.assertRaises(messages.FieldDefinitionError,\n messages.MessageField,\n str,\n 10)\n\n self.assertRaises(messages.FieldDefinitionError,\n messages.MessageField,\n messages.Message,\n 10)\n\n class MyMessage(messages.Message):\n pass\n\n field = messages.MessageField(MyMessage, 10)\n self.assertEquals(MyMessage, field.type)",
"def test_invalid_ref_in_links(self):\n with self.assertRaises(reschema.exceptions.InvalidReference):\n resource = list(self.sd.resources.values())[0]\n title = \"%s v%s %s\" % (self.sd.title, self.sd.version,\n self.sd.status)\n htmldoc = reschema.html.Document(title, printable=False)\n r2h = ResourceToHtml(resource, htmldoc.content,\n htmldoc.menu.add_submenu(),\n \"http://{device}/{root}\",\n None)\n baseid = html_str_to_id(r2h.schema.fullid(True))\n div = r2h.container.div(id=baseid)\n r2h.menu.add_item(r2h.schema.name, href=div)\n r2h.process_links(div, baseid)",
"def error(self, message):\n raise ArgumentParseError(message)",
"def check_message(self, msg):\n pass",
"def _ReferenceFromSerialized(serialized):\n if not isinstance(serialized, basestring):\n raise TypeError('serialized must be a string; received %r' % serialized)\n elif isinstance(serialized, unicode):\n serialized = serialized.encode('utf8')\n return entity_pb.Reference(serialized)",
"def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')",
"def error(self, message):\r\n self._construct_partial_parser().error(message)",
"def invalid_request_content(message):\n exception_tuple = LambdaErrorResponses.InvalidRequestContentException\n\n return BaseLocalService.service_response(\n LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, message),\n LambdaErrorResponses._construct_headers(exception_tuple[0]),\n exception_tuple[1],\n )",
"def valid_entity_ref(s):\n try:\n parse_entity_ref(s, lambda x: x)\n return s\n except Exception as e:\n raise argparse.ArgumentTypeError(str(e))",
"def handle_message(self, validated_message: dict):\n pass",
"def validate_ref_type(dictionary, yaml_file):\n\n if not _valid_ref_type(dictionary['ref']):\n raise ClowderYAMLError(fmt.invalid_ref_error(dictionary['ref'], yaml_file))",
"def testAbsoluteReference(self):\n # Define modules.\n a = self.DefineModule('a')\n b = self.DefineModule('a.a')\n\n # Define messages.\n aA = self.DefineMessage('a', 'A')\n aaA = self.DefineMessage('a.a', 'A')\n\n # Always find a.A.\n self.assertEquals(aA, messages.find_definition('.a.A', None,\n importer=self.Importer))\n self.assertEquals(aA, messages.find_definition('.a.A', a,\n importer=self.Importer))\n self.assertEquals(aA, messages.find_definition('.a.A', aA,\n importer=self.Importer))\n self.assertEquals(aA, messages.find_definition('.a.A', aaA,\n importer=self.Importer))",
"def testMessageField_WrongType(self):\n global AnEnum\n try:\n class AnEnum(messages.Enum):\n pass\n\n class AnotherMessage(messages.Message):\n\n a_field = messages.MessageField('AnEnum', 1)\n\n self.assertRaises(messages.FieldDefinitionError,\n getattr,\n AnotherMessage.field_by_name('a_field'),\n 'type')\n finally:\n del AnEnum",
"def testEnumField_WrongType(self):\n global AMessage\n try:\n class AMessage(messages.Message):\n pass\n\n class AnotherMessage(messages.Message):\n\n a_field = messages.EnumField('AMessage', 1)\n\n self.assertRaises(messages.FieldDefinitionError,\n getattr,\n AnotherMessage.field_by_name('a_field'),\n 'type')\n finally:\n del AMessage",
"def testMessageFieldValidate(self):\n class MyMessage(messages.Message):\n pass\n\n class AnotherMessage(messages.Message):\n pass\n\n field = messages.MessageField(MyMessage, 10)\n field.validate(MyMessage())\n\n self.assertRaises(messages.ValidationError,\n field.validate,\n AnotherMessage())",
"def parse_message(self, message):\n pass",
"def BadRequest(message):\n return f\"Bad Request: {message}\", 400"
] |
[
"0.53380877",
"0.53224725",
"0.52386135",
"0.51981986",
"0.5152795",
"0.5132949",
"0.5045758",
"0.49936837",
"0.49792278",
"0.4978647",
"0.49724352",
"0.49381897",
"0.48967144",
"0.48853883",
"0.487848",
"0.48769715",
"0.48400518",
"0.482421",
"0.48176172",
"0.4797601",
"0.47966677",
"0.47721648",
"0.4755015",
"0.4753095",
"0.47490597",
"0.47401398",
"0.4739486",
"0.4737984",
"0.47352794",
"0.47169983"
] |
0.7516594
|
0
|
Returns the reference ID field for a provided resource type.
|
def get_reference_id_field_for_resource(
reference: message.Message,
resource_type: str) -> descriptor.FieldDescriptor:
_validate_reference(reference)
field_name = path_utils.camel_case_to_snake_case(resource_type) + '_id'
field = reference.DESCRIPTOR.fields_by_name.get(field_name)
if field is None:
raise ValueError(f'Resource type {resource_type!r} is not valid for a '
f'reference. Field {field_name!r} does not exist.')
return field
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_id(type_: Dict[str, str]) -> int:\n return int(type_[f'{type_name}_id'])",
"def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")",
"def getTypeID(self) -> int:\n ...",
"def get_id_from_ref(ref):\n ref_id = None\n if ref is not None and len(ref) > 0:\n ref_id = path.split(ref)[1]\n return ref_id",
"def resourceDocumentId(self, resource: Resource) -> str:",
"def get_id(self, refobj):\n return cmds.getAttr(\"%s.identifier\" % refobj)",
"def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier",
"def reference_id(self) -> Optional[str]:\n return pulumi.get(self, \"reference_id\")",
"def get_id(request, request_type):\n if request_type == \"post\":\n id = request.POST['id']\n else:\n id = request.GET['id']\n id = id.rsplit('_')\n id = int(id[1])\n return id",
"def get_type_id(type_url):\n # TODO\n return type_url",
"def get_record_type_id(self, obj_type, developer_name):\n soql = \"SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'\".format(\n obj_type, developer_name\n )\n res = self.cumulusci.sf.query_all(soql)\n return res[\"records\"][0][\"Id\"]",
"def get_object_id(resource):\n if hasattr(resource, \"object_id\"):\n return int(resource.object_id)\n\n return int(resource.id)",
"def paypal_reference_id_type_enum(self) -> ReferenceIdType:\n return _REFERENCE_ID_MAPPINGS.get(self.paypal_reference_id_type)",
"def field_ref(self) -> str:\n return pulumi.get(self, \"field_ref\")",
"def resourceid(self):",
"def get_resource_id(self, obj):\n return obj.id",
"def getIdRef(self):\n return _libsbml.SBaseRef_getIdRef(self)",
"def get_identifier(self, identifier_type):\n if identifier_type == 'ID':\n retval = self._identity\n elif identifier_type == 'Title':\n retval = self._title\n else:\n raise ValueError('identifier_type is neither \\'ID\\' nor \\'Title\\'')\n return retval",
"def type_id(self):\n return self._type_id",
"def __get_type_id(record: TNSRecord) -> int:\n return ObjectType.get_or_create(record.type or 'Unknown').id",
"def id(self):\n return self.raw_resource[\"id\"]",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")"
] |
[
"0.7214203",
"0.6473589",
"0.64262575",
"0.6391519",
"0.6350565",
"0.6340821",
"0.63336617",
"0.6285277",
"0.6248177",
"0.622544",
"0.6193647",
"0.61639434",
"0.61486083",
"0.6090768",
"0.60696644",
"0.6055495",
"0.602719",
"0.60268337",
"0.60031354",
"0.5948144",
"0.5941824",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001",
"0.5930001"
] |
0.8575352
|
0
|
Gets the "base" mode for given mode. This function returns "L" for images that contain grayscale data, and "RGB" for images that contain color data.
|
def getmodebase(mode):
return ImageMode().getmode(mode).basemode
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getmodetype(mode):\r\n return ImageMode().getmode(mode).basetype",
"def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]",
"def get_mode(self, image, preview=False):\n if preview:\n self.mode = 'preview'\n elif len(image.shape) == 3:\n self.mode = 'train'\n else:\n self.mode = 'binary'",
"def mode(self) -> int:",
"def _get_mode(self):\n raise NotImplementedError",
"def mode(self):\n return self._data.get('mode', None)",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def get_mode(self):\r\n return self.mode",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def getmode(self):\n return self.mode",
"def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...",
"def mode(self) -> Optional[str]:\n for mode in self._modes:\n if mode.active:\n return mode.name\n return None",
"def getmodebands(mode):\r\n return len(ImageMode().getmode(mode).bands)",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\n if \"mode\" in self.recipe:\n return self.recipe[\"mode\"]\n else:\n raise ValueError(\"No mode defined for recipe {}!\".format(self))",
"def mode(self) -> int:\n return self._mode",
"def get_mode(self):\r\n return self._api.get_mode()",
"def mode(self):\n return self.__mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def _get_mode():\n return context.get_context('mode')",
"def mode(self) -> str:\r\n return self._mode",
"def mode(self) -> Union[int, float, str,\n List[int], List[float], List[str]]:\n mode = self._data.mode()\n if len(mode) > 1:\n return mode.to_list()\n else:\n return mode[0]",
"def get_mode(x):\n mode, count = Counter(x).most_common(1)[0]\n return mode",
"def mode(self):\n return self._mode_func"
] |
[
"0.74193406",
"0.6618623",
"0.63047886",
"0.6293309",
"0.6206783",
"0.6195586",
"0.61842334",
"0.6122007",
"0.6116381",
"0.61054254",
"0.61054254",
"0.6093441",
"0.6092703",
"0.6055182",
"0.6052117",
"0.60268027",
"0.60268027",
"0.60268027",
"0.60228217",
"0.6016279",
"0.5993168",
"0.599308",
"0.59887886",
"0.59887886",
"0.59887886",
"0.5988252",
"0.59879005",
"0.5973144",
"0.5967708",
"0.59560025"
] |
0.8632918
|
0
|
Gets the storage type mode. Given a mode, this function returns a singlelayer mode suitable for storing individual bands.
|
def getmodetype(mode):
return ImageMode().getmode(mode).basetype
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mode(self):\n return self._data.get('mode', None)",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_mode(self):\r\n return self.mode",
"def getmode(self):\n return self.mode",
"def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]",
"def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def getMode(self):\n return self._mode",
"def mode(self):\n\n return self._mode",
"def mode(self) -> Mode:\n return self._mode",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def mode(self) -> Union[int, float, str,\n List[int], List[float], List[str]]:\n mode = self._data.mode()\n if len(mode) > 1:\n return mode.to_list()\n else:\n return mode[0]",
"def mode(self):\n return self._lift(\"mode\")",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\n return self.__mode",
"def _get_mode(self):\n raise NotImplementedError",
"def mode(self) -> Optional[pulumi.Input[Union[str, 'Mode']]]:\n return pulumi.get(self, \"mode\")",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)"
] |
[
"0.72878486",
"0.70554715",
"0.70130104",
"0.6983565",
"0.6965334",
"0.69422174",
"0.69383216",
"0.6925486",
"0.6925486",
"0.6924492",
"0.6924492",
"0.6924492",
"0.6917303",
"0.6915846",
"0.6915617",
"0.6906055",
"0.6906055",
"0.6905283",
"0.6902822",
"0.6902822",
"0.6900335",
"0.6890108",
"0.6871674",
"0.68499243",
"0.68499243",
"0.68499243",
"0.68271935",
"0.68039244",
"0.67605925",
"0.673794"
] |
0.73119026
|
0
|
Gets a list of individual band names. Given a mode, this function returns a tuple containing the names of individual bands (use
|
def getmodebandnames(mode):
return ImageMode().getmode(mode).bands
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getmodebands(mode):\r\n return len(ImageMode().getmode(mode).bands)",
"def bands(self):\n\t\treturn self._bands",
"def bands(self):\n return self._bands",
"def get_worker_bands(self) -> List[BandType]:",
"def bandname(self):\n return self._properties[\"bandname\"]",
"def getBandnames(self,cx,cy):\n bandnames = []\n\n for bandname in self.cells:\n if [cx,cy] in self.cells[bandname]:\n bandnames.append(bandname)\n\n return bandnames",
"def usedbands(self):\n lc = self.lc\n return np.unique(np.asarray(lc['band']))",
"def get_output_bands(self):\n dlist=self.dest_list.children()\n out_list=[]\n for item in dlist:\n out_list.append((self.output_bands[item][0],\n self.output_bands[item][1]))\n return out_list",
"def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")",
"def get_singers_and_songs_by_mode(mode: int) -> tuple:\n if mode == TAIWANESE_MODE:\n return taiwanese_singers, taiwanese_songs\n pass\n elif mode == CHINESE_MODE:\n return chinese_singers, chinese_songs\n pass\n elif mode == CHINESE_POPULAR_MODE:\n return chinese_popular_singers, chinese_popular_songs\n pass\n else:\n raise Exception(\"invalid mode\")\n pass",
"def bandname(self):\n if self._properties['bandname'] is None:\n self._properties['bandname'] = \"fuv\" if \"-fd-\" in self.filename else \"nuv\" if \"-nd-\" in self.filename \\\n else \"unknown\"\n return self._properties['bandname']",
"def bandname(self):\n if self._properties['bandname'] is None:\n self._properties['bandname'] = \"wisew1\" if \"-w1-\" in self.filename \\\n else \"wisew2\" if \"-w2-\" in self.filename \\\n else \"wisew3\" if \"-w3-\" in self.filename \\\n else \"wisew4\" if \"-w4-\" in self.filename \\\n else \"unknown\"\n return self._properties['bandname']",
"def bands(self):\n if self._bands is None:\n self._bands = self._compute_bands()\n return self._bands",
"def bandlcs(self):\n lc = self.lc\n bandgroups = lc.group_by('band')\n return bandgroups",
"def read_bands(bands):\n bands = map(str.split, open(bands).readlines())\n bands = [\"-\".join(b) for b in bands]\n return bands",
"def _lookup_bands(platform, wavelengths):\r\n wave_bands = {\r\n Platform.Landsat5: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs\": \"6\",\r\n \"swir2\": \"7\",\r\n },\r\n Platform.Landsat7: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs1\": \"6_VCID_1\",\r\n \"tirs2\": \"6_VCID_2\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n },\r\n Platform.Landsat8: {\r\n \"aerosol\": \"1\",\r\n \"blue\": \"2\",\r\n \"green\": \"3\",\r\n \"red\": \"4\",\r\n \"nir\": \"5\",\r\n \"swir1\": \"6\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n \"cirrus\": \"9\",\r\n \"tirs1\": \"10\",\r\n \"tirs2\": \"11\",\r\n },\r\n Platform.Sentinel2: {\r\n \"aerosol\": \"0\",\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"rededge1\": \"4\",\r\n \"rededge2\": \"5\",\r\n \"rededge3\": \"6\",\r\n \"nir\": \"7\",\r\n \"rededge4\": \"8\",\r\n \"watervapor\": \"9\",\r\n \"cirrus\": \"10\",\r\n \"swir1\": \"11\",\r\n \"swir2\": \"12\",\r\n },\r\n }\r\n\r\n return [wave_bands[platform][wavelength.lower()] for wavelength in wavelengths]",
"def get_band_filenames(xmldoc):\n band_dict = {}\n bands = xmldoc.find('.//bands')\n for bandxml in bands:\n band_name = (bandxml.get('name'))\n file = bandxml.find('.//file_name')\n band_file_name = file.text\n band_dict[band_name] = band_file_name\n return (band_dict)",
"def getSpectralTypes():\n return ['B','A','F','G','K','M']",
"def getbandlcs(lc):\n bandgroups = lc.group_by('band')\n\n return bandgroups",
"def coords(mode: str = 'illuminated', band: str = '78') -> tuple[slice, slice]:\n if mode == 'level2':\n if band == '7':\n return np.s_[12:227, 20:500]\n if band == '8':\n return np.s_[12:227, :480]\n # else\n return np.s_[12:227, 20:980]\n\n if band == '7':\n return np.s_[11:228, 16:500]\n if band == '8':\n return np.s_[11:228, :491]\n # else\n return np.s_[11:228, 16:991]",
"def __str__(self):\n return str([str(bandit) for bandit in self.bandits])",
"def get_list_of_bands_as_dict(size=None):\n\n layer = gview.app.sel_manager.get_active_layer()\n if layer is None:\n\treturn {}\n if size is None:\n size = get_raster_size(layer)\n dict = {}\n for curview in gview.app.view_manager.view_list:\n for curlayer in curview.viewarea.list_layers():\n \t curname = curlayer.get_name()\n cursize = get_raster_size(curlayer)\n\t if cursize == size:\n\t\tnum_bands = curlayer.get_parent().get_dataset().RasterCount\n ds=curlayer.get_parent().get_dataset()\n\t\tfor i in range(1,num_bands+1):\n\t \t curband = curname + '.band['+ str(i) + ']'\n\t\t dict[gtk.ListItem(curband)] = (ds,i,curband)\n if dict is None:\n\treturn None\n return dict",
"def bands(self) -> int:\n ...",
"def build_band_urls(scene, bands):\n return [band_url(scene, band) for band in bands]",
"def getSpinors(self,mode='full'):\n if mode=='full':\n return self.__allSpinors\n else:\n return self.__allSpinors[modeDict[mode][1]]",
"def bands(self):\n\t\treturn zip((self.primary_threshold, self.upper_earning_limit),\n\t\t\t\t self.rates)",
"def _detect_available_bands(self):\n return (\n [col.rpartition('_')[0] for col in self._columns if col.endswith('_FLUXMAG0')] or\n [col.partition('_')[2] for col in self._columns if col.startswith('psFlux_')]\n )",
"def get_vrt_band_list():\n logger.debug('get_vrt_band_list() called')\n vrt_band_list = []\n#===============================================================================\n# sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])]\n# # log_multiline(logger.debug, sensor, 'Sensor', '\\t')\n# for file_number in sorted(sensor_dict.keys()):\n# band_info = sensor_dict[file_number]\n# if band_info['level_name'] == 'NBAR':\n# dataset_dir = dataset_info['nbar_dataset_path']\n# dataset_id = dataset_info['nbar_dataset_id']\n# processing_level = dataset_info['nbar_level_name']\n# nodata_value = dataset_info['nbar_nodata_value']\n# resampling_method = dataset_info['nbar_resampling_method']\n# elif band_info['level_name'] == 'ORTHO':\n# dataset_dir = dataset_info['l1t_dataset_path']\n# dataset_id = dataset_info['l1t_dataset_id']\n# processing_level = dataset_info['l1t_level_name']\n# nodata_value = dataset_info['l1t_nodata_value']\n# resampling_method = dataset_info['l1t_resampling_method']\n# else:\n# continue # Ignore any pan-chromatic and derived bands\n# \n# dataset_dir = os.path.join(dataset_dir, 'scene01')\n# filename = find_file(dataset_dir, band_info['file_pattern'])\n# vrt_band_list.append({'file_number': band_info['file_number'], \n# 'filename': filename, \n# 'name': band_info['band_name'],\n# 'dataset_id': dataset_id,\n# 'band_id': band_info['band_id'],\n# 'processing_level': processing_level,\n# 'nodata_value': nodata_value,\n# 'resampling_method': resampling_method,\n# 'tile_layer': band_info['tile_layer']})\n#===============================================================================\n \n #TODO: Make this able to handle multiple derived layers\n for band_level in ['FC']:\n derived_bands = self.bands[tile_type_id][('DERIVED', band_level)]\n for file_number in sorted(derived_bands.keys()):\n band_info = derived_bands[file_number]\n file_pattern = band_info['file_pattern']\n dataset_dir = os.path.join(dataset_info['fc_dataset_path'], 'scene01')\n dataset_id = dataset_info['fc_dataset_id']\n filename = find_file(dataset_dir, file_pattern) \n processing_level = dataset_info['fc_level_name']\n nodata_value = dataset_info['fc_nodata_value'] # Should be None for FC\n resampling_method = dataset_info['fc_resampling_method']\n vrt_band_list.append({'file_number': None, \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': 1})\n \n log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\\t')\n return vrt_band_list",
"def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps",
"def modes(self) -> List[str]:\n return [m.name for m in self._modes]"
] |
[
"0.7001149",
"0.6439917",
"0.6196185",
"0.6136903",
"0.6134504",
"0.61022913",
"0.60811275",
"0.6024488",
"0.5933279",
"0.59185696",
"0.587115",
"0.5865979",
"0.58442855",
"0.5841001",
"0.5840017",
"0.57507384",
"0.5698309",
"0.5633168",
"0.5614396",
"0.55924386",
"0.5540013",
"0.55267924",
"0.54195684",
"0.54117197",
"0.5358563",
"0.53535",
"0.5326632",
"0.53162247",
"0.53022534",
"0.5260754"
] |
0.84709096
|
0
|
'Inplace' analog of Image.alpha_composite. Composites an image onto this image.
|
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
if not isinstance(source, (list, tuple)):
raise ValueError("Source must be a tuple")
if not isinstance(dest, (list, tuple)):
raise ValueError("Destination must be a tuple")
if not len(source) in (2, 4):
raise ValueError("Source must be a 2 or 4-tuple")
if not len(dest) == 2:
raise ValueError("Destination must be a 2-tuple")
if min(source) < 0:
raise ValueError("Source must be non-negative")
if min(dest) < 0:
raise ValueError("Destination must be non-negative")
channels, depth = self._get_channels_and_depth(im)
_mode = self._get_mode(im.shape, im.dtype)
_im = self._new(_mode, (im.shape[1], im.shape[0]))
if len(source) == 2:
source = source + _im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + _im.size:
overlay = _im
else:
overlay = _im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self._instance
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def alpha_composite(im1, im2):\r\n r1, g1, b1, a1 = Image().split(im1)\r\n r2, g2, b2, a2 = Image().split(im2)\r\n alphacomp = np.zeros(im1.shape, dtype=im1.dtype)\r\n im3 = composite(alphacomp, im1, a1)\r\n alphacomp = np.zeros(im2.shape, dtype=im2.dtype)\r\n im4 = composite(alphacomp, im2, a2)\r\n return blend(im3, im4, 0.5)",
"def alpha_composite_with_color(image, color=(255, 255, 255)):\n back = Image.new('RGBA', size=image.size, color=color + (255,))\n return alpha_composite(image, back)",
"def composite(self, image, left, top):\n library.MagickCompositeImage(self.wand, image.wand,\n COMPOSITE_OPS.index('over'), left, top)\n self.raise_exception()",
"def composite(args):\n\n # load the input image\n logging.info('Loading input image %s' % (args.input))\n inputImage = load_image(args.input)\n\n # load the target image\n logging.info('Loading target image %s' % (args.target))\n targetImage = load_image(args.target)\n\n # load the mask image\n logging.info('Loading mask image %s' % (args.mask))\n maskImage = load_image(args.mask)\n\n # If None, set the source points or sets them to the whole input image\n if args.source == None:\n (height, width, _) = inputImage.shape\n args.source = [0.0, height, 0.0, 0.0, width, 0.0, width, height]\n\n # Loads the source points into a 4-by-2 array\n source_points = np.array(args.source).reshape(4, 2)\n\n # Loads the target points into a 4-by-2 array\n target_points = np.array(args.dst).reshape(4, 2)\n\n # Compute the composite image\n result = composite_image(inputImage, targetImage,\n source_points, target_points, maskImage)\n result=np.uint8(result)\n # save the result\n logging.info('Saving result to %s' % (args.output))\n imageio.imwrite(args.output, result)",
"def putalpha(self, alpha):\r\n channels, depth = self._get_channels_and_depth(self._mode)\r\n\r\n if isinstance(alpha, np.ndarray): \r\n paste_image = True\r\n else:\r\n paste_image = False\r\n\r\n if channels==4:\r\n r, g, b, a = self.split()\r\n if not paste_image:\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n colorband = (r, g, b, a)\r\n self._instance = merge(\"RGBA\", colorband, image=True)\r\n elif channels == 3:\r\n if not paste_image:\r\n sh = self._instance.shape\r\n sh = (sh[0], sh[1], 1)\r\n a = np.zeros(sh, dtype=depth)\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n r, g, b = self.split()\r\n colorband = (r, g, b, a)\r\n self._instance = merge(\"RGBA\", colorband, image=True)\r\n elif channels < 2: # \"L\" or \"LA\"\r\n if not paste_image:\r\n sh = self._instance.shape\r\n sh = (sh[0], sh[1], 1)\r\n a = np.zeros(sh, dtype=depth)\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n if channels == 2:\r\n l, a_old = self.split()\r\n colorband = (l, a)\r\n else:\r\n colorband = (self._instance, a)\r\n self._instance = merge(\"LA\", colorband, image=True)",
"def alpha_composite(front, back):\n front = np.asarray(front)\n back = np.asarray(back)\n result = np.empty(front.shape, dtype='float')\n alpha = np.index_exp[:, :, 3:]\n rgb = np.index_exp[:, :, :3]\n falpha = front[alpha] / 255.0\n balpha = back[alpha] / 255.0\n result[alpha] = falpha + balpha * (1 - falpha)\n old_setting = np.seterr(invalid='ignore')\n result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]\n np.seterr(**old_setting)\n result[alpha] *= 255\n np.clip(result, 0, 255)\n # astype('uint8') maps np.nan and np.inf to 0\n result = result.astype('uint8')\n result = Image.fromarray(result, 'RGBA')\n return result",
"def getComposite(images):\n composite = mapnik.Image(images[0].width(), images[0].height())\n for image in images:\n composite.blend(0, 0, image, 1.0)\n return composite",
"def createComposite(self):\n\n success = False\n msg = 'Placeholder'\n\n #########################################\n ## PLACE YOUR CODE BETWEEN THESE LINES ##\n #########################################\n if (self._images[\"backIn\"] is not None) and (self._images[\"colIn\"] is not None) and (\n self._images[\"alphaIn\"] is not None):\n self.useTriangulationResults()\n msg = \"Composite success\"\n back = self._images[\"backIn\"] / 255.0\n col = self._images[\"colIn\"] / 255.0\n alpha = self._images[\"alphaIn\"] / 255.0\n col_R = col[:, :, 2]\n col_G = col[:, :, 1]\n col_B = col[:, :, 0]\n back_R = back[:, :, 2]\n back_G = back[:, :, 1]\n back_B = back[:, :, 0]\n\n compOut = col + back - alpha * back\n success = True\n # m = self._images[\"colIn\"].shape[0]\n # n = self._images[\"colIn\"].shape[1]\n # img_out = self._images[\"colIn\"]\n # for i in range(m):\n # for j in range(n):\n # compOut_R = self._images[\"colIn\"][i][j][2] + \\\n # (1.0 - self._images[\"alphaIn\"][i, j] / 255.0) * self._images[\"backIn\"][i, j][2]\n # compOut_G = self._images[\"colIn\"][i][j][1] + \\\n # (1.0 - self._images[\"alphaIn\"][i, j] / 255.0) * self._images[\"backIn\"][i, j][1]\n # compOut_B = self._images[\"colIn\"][i][j][0] + \\\n # (1.0 - self._images[\"alphaIn\"][i, j] / 255.0) * self._images[\"backIn\"][i, j][0]\n #\n # img_out[i, j] = [compOut_B[0], compOut_G[0], compOut_R[0]]\n self._images[\"compOut\"] = compOut * 255.0\n # self._images[\"compOut\"] = cv.merge((out_B, out_G, out_R))\n #########################################\n\n return success, msg",
"def overlay_alpha_images(img1, img2, keepalpha=True, dtype=np.float32,\n impl='inplace'):\n rgb1, alpha1 = _prep_rgb_alpha(img1, dtype=dtype)\n rgb2, alpha2 = _prep_rgb_alpha(img2, dtype=dtype)\n\n # Perform the core alpha blending algorithm\n if impl == 'simple':\n rgb3, alpha3 = _alpha_blend_simple(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'inplace':\n rgb3, alpha3 = _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr1':\n rgb3, alpha3 = _alpha_blend_numexpr1(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr2':\n rgb3, alpha3 = _alpha_blend_numexpr2(rgb1, alpha1, rgb2, alpha2)\n else:\n raise ValueError('unknown impl={}'.format(impl))\n\n if keepalpha:\n raster = np.dstack([rgb3, alpha3[..., None]])\n # Note: if we want to output a 255 img we could do something like this\n # out = np.zeros_like(img1)\n # out[..., :3] = rgb3\n # out[..., 3] = alpha3\n else:\n raster = rgb3\n return raster",
"def _blend(img1, img2, alpha):\n return img1.mul(alpha).add(1 - alpha, img2)",
"def background_composite(\n image: np.ndarray | Image,\n fill: int = 255,\n *,\n alpha: bool,\n) -> np.ndarray:\n if not isinstance(image, Image.Image):\n image = Image.fromarray(image)\n\n image = image.convert(\"RGBA\")\n\n composite = Image.fromarray(\n np.full([*list(image.size[::-1]), 4], fill, dtype=np.uint8),\n )\n composite.alpha_composite(image)\n if not alpha:\n return np.asarray(composite.convert(\"RGB\"))\n\n return np.asarray(composite)",
"def merge_into(self, dst):\n # We must respect layer visibility, because saving a\n # transparent PNG just calls this function for each layer.\n src = self\n dst.strokes.extend(self.strokes)\n for tx, ty in dst._surface.get_tiles():\n surf = dst._surface.get_tile_memory(tx, ty, readonly=False)\n surf[:,:,:] = dst.effective_opacity * surf[:,:,:]\n for tx, ty in src._surface.get_tiles():\n surf = dst._surface.get_tile_memory(tx, ty, readonly=False)\n src._surface.composite_tile(surf, tx, ty,\n opacity=self.effective_opacity,\n mode=self.compositeop)\n dst.opacity = 1.0",
"def alpha_blend(input_image, segmentation_mask, alpha=0.5):\n blended = np.zeros(input_image.size, dtype=np.float32)\n blended = input_image * alpha + segmentation_mask * (1 - alpha)\n return blended",
"def imageconcat(self, *args, **kwargs):\n return _image.image_imageconcat(self, *args, **kwargs)",
"def set_blend_mode_over(self):\n self.image_item.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)",
"def watermark(self, image, transparency=0.0, left=0, top=0):\n with image.clone() as watermark_image:\n watermark_image.transparentize(transparency)\n self.composite(watermark_image, left, top)\n self.raise_exception()",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)",
"def compose(dst: np.ndarray, src: np.ndarray) -> np.ndarray:\n a, b = ensure_alpha(src), ensure_alpha(dst)\n alpha = extract_alpha(a)\n result = b * (1.0 - alpha) + a * alpha\n if dst.shape[2] == 3:\n return extract_rgb(result)\n return result",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)",
"def overlay_image_alpha(self,img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])",
"def compose_premultiplied(dst: np.ndarray, src: np.ndarray):\n a, b = ensure_alpha(src), ensure_alpha(dst)\n alpha = extract_alpha(a)\n result = b * (1.0 - alpha) + a\n if dst.shape[2] == 3:\n return extract_rgb(result)\n return result",
"def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img",
"def _build_final_image(self, image):\n if self._overlay_image:\n overlay = Image.open(self._overlay_image).convert('RGBA')\n overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)\n image = Image.alpha_composite(image.convert('RGBA'), overlay)\n image = image.convert('RGB')\n return image",
"def _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2):\n rgb3 = np.empty_like(rgb1)\n temp_rgb = np.empty_like(rgb1)\n alpha3 = np.empty_like(alpha1)\n temp_alpha = np.empty_like(alpha1)\n\n # hold (1 - alpha1)\n np.subtract(1, alpha1, out=temp_alpha)\n\n # alpha3\n np.copyto(dst=alpha3, src=temp_alpha)\n np.multiply(alpha2, alpha3, out=alpha3)\n np.add(alpha1, alpha3, out=alpha3)\n\n # numer1\n np.multiply(rgb1, alpha1[..., None], out=rgb3)\n\n # numer2\n np.multiply(alpha2, temp_alpha, out=temp_alpha)\n np.multiply(rgb2, temp_alpha[..., None], out=temp_rgb)\n\n # (numer1 + numer2)\n np.add(rgb3, temp_rgb, out=rgb3)\n\n # removing errstate is actually a significant speedup\n with np.errstate(invalid='ignore'):\n np.divide(rgb3, alpha3[..., None], out=rgb3)\n if not np.all(alpha3):\n rgb3[alpha3 == 0] = 0\n return rgb3, alpha3",
"def image_overlay(image, image_blend, alpha=0.2, cmap_image=\"Greys_r\", cmap_blend=\"jet\"):\n plt.imshow(image, cmap=cmap_image)\n plt.imshow(image_blend, cmap=cmap_blend, interpolation=\"none\", alpha=alpha)",
"def paste(self, src, x_off, y_off):\n x_off, y_off = int(x_off), int(y_off)\n\n # Overlap rectangle in target image coordinates\n width, height = src.width, src.height\n x1 = max(x_off, 0)\n y1 = max(y_off, 0)\n x2 = min(x_off + width, self.width)\n y2 = min(y_off + height, self.height)\n\n # Paste location is totally outside image\n if x1 >= x2 or y1 >= y2:\n return\n\n # Overlap rectangle in source image coordinates\n sx1 = x1 - x_off\n sy1 = y1 - y_off\n sx2 = x2 - x_off\n sy2 = y2 - y_off\n\n # Perform paste\n target = self.img\n source = src.img\n alpha = 3\n\n if self.channels == 4 and src.channels == 4:\n # Use alpha blending\n for c in range(0, 3):\n target[y1:y2, x1:x2, c] = source[sy1:sy2, sx1:sx2, c] * (source[sy1:sy2, sx1:sx2, alpha] / 255.0) \\\n + target[y1:y2, x1:x2, c] * (1.0 - source[sy1:sy2, sx1:sx2, alpha] / 255.0)\n\n target[y1:y2, x1:x2, alpha] = np.full((y2-y1, x2-x1), 255, np.uint8)\n\n else:\n # No alpha blending\n target[y1:y2, x1:x2] = src.img[sy1:sy2, sx1:sx2]",
"def paste(self, other):\n r, g, b, alpha = other.pil_image.split()\n pil_image = self.pil_image.copy()\n pil_image.paste(other.pil_image, mask=alpha)\n return kurt.Image(pil_image)",
"def set_blend_mode_plus(self):\n self.image_item.setCompositionMode(QtGui.QPainter.CompositionMode_Plus)",
"def _image_paste(self, image, dest_image, pos_x, pos_y):\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image",
"def Compact(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Compact(self, *args)"
] |
[
"0.74177617",
"0.7093004",
"0.70563364",
"0.64599884",
"0.64136696",
"0.6405975",
"0.6344653",
"0.63076115",
"0.623741",
"0.62366575",
"0.6189054",
"0.6123106",
"0.6010139",
"0.59794265",
"0.5974006",
"0.59513146",
"0.5915312",
"0.5914474",
"0.58987004",
"0.58516055",
"0.58360004",
"0.5816501",
"0.58080554",
"0.5797416",
"0.57529086",
"0.5692478",
"0.5666619",
"0.5665812",
"0.5625776",
"0.5622694"
] |
0.75862175
|
0
|
Returns a histogram for the image. The histogram is returned as a list of pixel counts, one for each pixel value in the source image. If the image has more than one band, the histograms for all bands are concatenated (for example, the histogram for an "RGB" image contains 768 values). A bilevel image (mode "1") is treated as a greyscale ("L") image by this method. If a mask is provided, the method returns a histogram for those parts of the image where the mask image is nonzero. The mask image must have the same size as the image, and be either a bilevel image (mode "1") or a greyscale image ("L").
|
def histogram(self, mask=None, extrema=None):
uni, counts = self._getcolors()
return [l for l in counts]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def describe(self, image, mask=None):\n histogram = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n\n return histogram.flatten()",
"def histograms(self, *args, **kwargs):\n return _image.image_histograms(self, *args, **kwargs)",
"def histogram(self, image):\n\n response = self._send_request(\"histogram\", files=dict(image=image))\n return response[self._layer]['histogram']",
"def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist",
"def calc_histogram(self, img_data):\n\n histogram = [0] * self.color_depth\n\n for w in range(img_data.shape[0]):\n for h in range(img_data.shape[1]):\n pixel = img_data[w][h]\n histogram[pixel] += 1\n\n return histogram",
"def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist",
"def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist",
"def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist",
"def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)",
"def _histogram(image,\n min,\n max,\n bins):\n\n return numpy.histogram(image, bins, (min, max))[0]",
"def histogram(self):\n if np.size(self.stats['Counts']): # don't do anything to an empty list\n if np.size(self.bins) and not self.redo:\n return self.bins, self.occs, self.thresh\n elif np.size(self.bin_array) > 0: \n self.occs, self.bins = np.histogram(self.stats['Counts'], self.bin_array) # fixed bins. \n else:\n try:\n lo, hi = min(self.stats['Counts'])*0.97, max(self.stats['Counts'])*1.02\n # scale number of bins with number of files in histogram and with separation of peaks\n num_bins = int(15 + self.ind//100 + (abs(hi - abs(lo))/hi)**2*15) \n self.occs, self.bins = np.histogram(self.stats['Counts'], bins=np.linspace(lo, hi, num_bins+1)) # no bins provided by user\n except: \n self.occs, self.bins = np.histogram(self.stats['Counts'])\n else: self.occs, self.bins = np.zeros(10), np.arange(0,1.1,0.1)\n return self.bins, self.occs, self.thresh",
"def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()",
"def calculate_histogram(img, channel):\n\n # histogram arrays for each channel\n hist_gs_or_red = np.zeros((256, 1), dtype=np.int32)\n hist_green = np.zeros((256, 1), dtype=np.int32)\n hist_blue = np.zeros((256, 1), dtype=np.int32)\n\n # Calculate the histogram for red channel for RGB images\n # or the the first channel for gray-scale of shape (M, N, 1) images.\n if channel == [0]:\n # one-dimensional array\n if img.ndim == 1:\n raise Exception('Cannot calculate the hist of one-dimensional array.')\n\n # if there is one channel, or in case of gray-scale images, it's OK!\n elif img.ndim == 2:\n for pixel in np.ceil(img.flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # an RGB image\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 0:1].flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n\n return hist_gs_or_red\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [1]:\n # Not 3-D array that represent the image with 3 color channels.\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of green channel for non-rgb images/ 3-D array')\n\n # If it's a 3-D array of 3 color channels\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 1:2].flatten()).astype(np.int):\n hist_green[pixel] = hist_green[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n return hist_green\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [2]:\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of blue channel for non-rgb images/ 3-D array')\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 2:].flatten()).astype(np.int):\n hist_blue[pixel] = hist_blue[pixel] + 1\n return hist_blue\n\n # Invalid value of channel parameter\n else:\n raise Exception('ValueError: only [0], [1], [2] are possible as value for the channel parameter.')",
"def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()",
"def extract_histogram(self, bins=10):\n assert len(self.images) > 0, 'No images loaded! Did you call ' \\\n 'load_images() ?'\n histograms = []\n for image in self.images:\n grey = skicol.rgb2gray(image)\n hist_values, bins = np.histogram(grey, range=(0, 1), bins=bins)\n histograms.append(hist_values)\n histograms = np.array(histograms)\n histograms = histograms.astype('float')\n return histograms",
"def OF1_CalculateRawHistogram(image):\n h = np.zeros(256, np.float_)\n for i in np.nditer(image):\n h[i - 1] = h[i - 1] + 1\n\n return h",
"def histogram_image(image, method='rgb'):\n histogram = get_color_histogram(image)\n if method == 'hex':\n return {'#{:02X}{:02X}{:02X}'.format(*color): count\n for color, count in histogram.items()}\n else:\n return {'[{}, {}, {}]'.format(r, g, b): count\n for (r, g, b), count in histogram.items()}",
"def component_histograms(image, transform=None):\n if transform == None:\n transform = lambda x: x\n\n shape = image.shape\n assert len(shape) == 3\n _, _, num_comp = shape\n return [np.histogram(transform(image[:, :, [idx]]).flatten(),\n bins=NUM_HIST_BINS, range=(0, 256))[0]\n for idx in xrange(0, num_comp)]",
"def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()",
"def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)",
"def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram",
"def sym_histogram(self, X, mask=None):\n distances = euclidean_distance(X, self.V)\n membership = T.nnet.softmax(-distances / self.g ** 2)\n\n if mask is not None:\n histogram = membership * T.reshape(mask, (mask.shape[0], 1))\n histogram = T.sum(histogram, axis=0) / T.sum(mask, axis=0)\n else:\n histogram = T.mean(membership, axis=0)\n return histogram",
"def bins_from_image(path):\n _, Y = util.imread(path)\n closest_bins = util.closest_bins(Y).flatten()\n count = np.zeros(len(util.bins))\n for closest_bin in closest_bins:\n count[closest_bin] += 1\n return count",
"def getImstats(images, masks=None, bins=range(257)):\n # Dict for appending results to\n stats = dict()\n stats['bins'] = bins\n\n # Load images and masks\n images, masks = _load_images_and_masks(images, masks)\n\n # Calculate stats\n stats['meanVec'] = np.empty(len(images))\n stats['stdVec'] = np.empty(len(images))\n stats['histArr'] = np.empty((len(images), len(bins)-1))\n for i, (im, m) in enumerate(zip(images, masks)):\n tmp = im[m] if m is not None else im.flatten()\n stats['meanVec'][i] = tmp.mean()\n stats['stdVec'][i] = tmp.std()\n stats['histArr'][i,:] = np.histogram(tmp, bins=bins)[0]\n\n # Calculate means\n stats['meanLum'] = stats['meanVec'].mean()\n stats['meanStd'] = stats['stdVec'].mean()\n stats['meanHist'] = stats['histArr'].mean(axis=0)\n\n # Return\n return stats",
"def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()",
"def color_histogram_hsv(img, nbin=10, xmin=0, xmax=255, normalized=True):\n ndim = img.ndim\n bins = np.linspace(xmin, xmax, nbin+1)\n hsv = matplotlib.color.rgb_to_hsv(img/xmax) * xmax\n imhist, bin_edges = np.histogram(hsv[:, :, 0], bins=bins, density=normalized)\n imhist = imhist * np.diff(bin_edges)\n return imhist",
"def __get_color_histogram(self, image, seed, hist_res):\n \n L=[]\n N=len(seed)\n for i in range(N):\n \n L.append(image[seed[i][1],seed[i][0]])\n image_part=np.array(L)\n \n \n hist, bins= np.histogramdd(image_part,bins=hist_res,range=((0,255),(0,255),(0,255)) )\n #hist= ndimage.gaussian_filter(hist,sigma=7) # Gaussian smoothing\n\n return hist /np.linalg.norm(hist)",
"def hog_histograms(*args, **kwargs): # real signature unknown\n pass",
"def rgb_histogram(img, channels=[\"r\", \"g\", \"b\"]):\n hist = {}\n for ii, color in enumerate(channels):\n hist[color] = cv2.calcHist([img], [ii], None, [256], [0, 256])\n return hist",
"def histogram_to_image_filter(*args, **kwargs):\n import itk\n instance = itk.HistogramToImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()"
] |
[
"0.69900167",
"0.6610396",
"0.6552293",
"0.6299862",
"0.61408544",
"0.6139109",
"0.6043549",
"0.60427636",
"0.60298526",
"0.595549",
"0.5829149",
"0.5772954",
"0.57710135",
"0.5740489",
"0.57032293",
"0.5678399",
"0.567833",
"0.56576544",
"0.56424284",
"0.559242",
"0.557964",
"0.5527882",
"0.55187917",
"0.5482106",
"0.54447854",
"0.5430487",
"0.54161054",
"0.5414481",
"0.5402565",
"0.53488845"
] |
0.6618615
|
1
|
Adds or replaces the alpha layer in this image. If the image does not have an alpha layer, it's converted to "LA" or "RGBA". The new layer must be either "L" or "1".
|
def putalpha(self, alpha):
channels, depth = self._get_channels_and_depth(self._mode)
if isinstance(alpha, np.ndarray):
paste_image = True
else:
paste_image = False
if channels==4:
r, g, b, a = self.split()
if not paste_image:
a[:] = alpha
else:
a = alpha.copy()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels == 3:
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
r, g, b = self.split()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels < 2: # "L" or "LA"
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
if channels == 2:
l, a_old = self.split()
colorband = (l, a)
else:
colorband = (self._instance, a)
self._instance = merge("LA", colorband, image=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def overlay_alpha_layers(layers, keepalpha=True, dtype=np.float32):\n layer_iter = iter(layers)\n img1 = next(layer_iter)\n rgb1, alpha1 = _prep_rgb_alpha(img1, dtype=dtype)\n\n for img2 in layer_iter:\n rgb2, alpha2 = _prep_rgb_alpha(img2, dtype=dtype)\n rgb1, alpha1 = _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2)\n\n if keepalpha:\n raster = np.dstack([rgb1, alpha1[..., None]])\n else:\n raster = rgb1\n return raster",
"def add_layer(self, tilemanager, opacity=1.0):\n assert has_pil, _(\"Cannot blend layers without python PIL\")\n assert self.tile_size == tilemanager.tile_size, _(\"Cannot blend layers whose tile size differs\")\n assert 0 <= opacity <= 1, _(\"Opacity should be between 0.0 (transparent) and 1.0 (opaque)\")\n self.cache.basename += '%s%.1f' % (tilemanager.cache.basename, opacity)\n self._layers.append((tilemanager, opacity))",
"def add_layer(self, tilemanager, opacity=1.0):\n assert has_pil, _(\"Cannot blend layers without python PIL\")\n assert self.tile_size == tilemanager.tile_size, _(\"Cannot blend layers whose tile size differs\")\n assert 0 <= opacity <= 1, _(\"Opacity should be between 0.0 (transparent) and 1.0 (opaque)\")\n self.cache.basename += '%s%.1f' % (tilemanager.cache.basename, opacity)\n self._layers.append((tilemanager, opacity))",
"def add_alpha(image_data):\n\n # get hsv image\n hsv = rgb_to_hsv(image_data[:, :, :3].astype(float) / 255)\n\n # create new image and set alpha channel\n new_image_data = np.zeros(image_data.shape)\n new_image_data[:, :, 3] = hsv[:, :, 2]\n\n # set value of hsv image to either 0 or 1.\n hsv[:, :, 2] = np.where(hsv[:, :, 2] > 0, 1, 0)\n\n # combine alpha and new rgb\n new_image_data[:, :, :3] = hsv_to_rgb(hsv)\n return new_image_data",
"def overlay_alpha_images(img1, img2, keepalpha=True, dtype=np.float32,\n impl='inplace'):\n rgb1, alpha1 = _prep_rgb_alpha(img1, dtype=dtype)\n rgb2, alpha2 = _prep_rgb_alpha(img2, dtype=dtype)\n\n # Perform the core alpha blending algorithm\n if impl == 'simple':\n rgb3, alpha3 = _alpha_blend_simple(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'inplace':\n rgb3, alpha3 = _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr1':\n rgb3, alpha3 = _alpha_blend_numexpr1(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr2':\n rgb3, alpha3 = _alpha_blend_numexpr2(rgb1, alpha1, rgb2, alpha2)\n else:\n raise ValueError('unknown impl={}'.format(impl))\n\n if keepalpha:\n raster = np.dstack([rgb3, alpha3[..., None]])\n # Note: if we want to output a 255 img we could do something like this\n # out = np.zeros_like(img1)\n # out[..., :3] = rgb3\n # out[..., 3] = alpha3\n else:\n raster = rgb3\n return raster",
"def alpha_extend(color: C3I, alpha: int = 255) -> C4I:\n return (*color, alpha)",
"def layer_blend(foreground, background, foreground_alpha=.6):\n cv2.addWeighted(foreground, foreground_alpha,\n background, 1 - foreground_alpha, 0, background)\n\n return background",
"def set_alpha(self, value: Optional[int], flags: int = 0) -> 'BaseImage':\n if value is None:\n self._surface.set_alpha(None)\n return self\n assert isinstance(value, int)\n assert 0 <= value <= 255, 'alpha value must be an integer between 0 and 255'\n self._surface.set_alpha(value, flags)\n return self",
"def alpha(cls, rgb_color, transparency):\n\n if transparency > 1:\n transparency = 1\n elif transparency < 0:\n transparency = 0\n return rgb_color + str(hex(int(254 * transparency)))[2:]",
"def to_alpha(self):\n if self.channels == 3:\n alpha = opencv.cvtColor(self.img, opencv.COLOR_BGR2BGRA)\n return Image(alpha)\n elif self.channels == 1:\n alpha = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGRA)\n return Image(alpha)\n else:\n return Image(self.img)",
"def add_image(self, image_name, alpha=1):\n self.image_name = image_name\n self.image_alpha = alpha",
"def setAlpha ( self, newalpha ):\n if isinstance( newalpha, int ):\n raise ValueError('Expects a float value in the [ 0.0 - 1.0 ] range!')\n if newalpha > 1.0:\n newalpha = 1.0\n if newalpha < 0.0:\n newalpha = 0.0\n self.a = newalpha\n self.hsla[3] = newalpha\n self.rgba[3] = newalpha",
"def _set_transparency(self, transparency, elm):\n a = str(100 - transparency) + '196'\n \n alpha = OxmlElement('a:alpha')\n alpha.set('val', a)\n elm.srgbClr.append(alpha)",
"def apply_alpha(self, background=\"#000000FF\"):\n\n def tx_alpha(cf, af, cb, ab):\n \"\"\"Translate the color channel with the alpha channel and background channel color.\"\"\"\n\n return round_int(\n abs(\n cf * (af * RGB_CHANNEL_SCALE) + cb * (ab * RGB_CHANNEL_SCALE) * (1 - (af * RGB_CHANNEL_SCALE))\n )\n ) & 0xFF\n\n if self.a < 0xFF:\n r, g, b, a = self._split_channels(background)\n\n self.r = tx_alpha(self.r, self.a, r, a)\n self.g = tx_alpha(self.g, self.a, g, a)\n self.b = tx_alpha(self.b, self.a, b, a)\n\n return self.get_rgb()",
"def layer_overlay(foreground, background):\n overlaid = foreground.copy()\n negative_space = np.where(foreground[:, :, 3] == 0)\n\n overlaid[negative_space] = background[negative_space]\n\n overlaid[:, :, 3] = 255\n\n return overlaid",
"def SetAlpha(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS_SetAlpha(self, *args)",
"def alpha_blend(input_image, segmentation_mask, alpha=0.5):\n blended = np.zeros(input_image.size, dtype=np.float32)\n blended = input_image * alpha + segmentation_mask * (1 - alpha)\n return blended",
"def opacity(im,alpha):\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n else:\n im = im.copy()\n alphachannel = im.split()[3]\n alphachannel = ImageEnhance.Brightness(alphachannel).enhance(alpha)\n im.putalpha(alphachannel)\n return im",
"def overlay_image_alpha(self,img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])",
"def _blend(img1, img2, alpha):\n return img1.mul(alpha).add(1 - alpha, img2)",
"def set_alpha(self, alpha):\n if alpha < 0 or alpha > 255:\n raise ValueError(\"alpha must be betweeen 0 and 255\")\n\n self.alpha = alpha\n self.draw_alpha()",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)",
"def setTransparency(self, transparency):\n self.render_context.alpha_mode = transparency",
"def set_alpha(self, alpha=1.0):\r\n self.unif[17] = alpha",
"def alpha_extend_and_scale_to_01(color: C3I, alpha: int = 255) -> C4F:\n r, g, b = color\n return r / 255, g / 255, b / 255, alpha / 255",
"def draw_alpha(self):\n if self.alpha == 255:\n self.current_sprite_alpha = self.current_sprite\n else:\n mask = pygame.Surface(self.current_sprite.get_size(), flags=pygame.SRCALPHA)\n mask.fill((255, 255, 255, self.alpha))\n self.current_sprite_alpha = self.current_sprite.copy()\n self.current_sprite_alpha.blit(mask, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)",
"def add_alpha_helix(self, alpha_helix):\n assert self.default_model is not None\n self.default_model.add_alpha_helix(alpha_helix)",
"def _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2):\n rgb3 = np.empty_like(rgb1)\n temp_rgb = np.empty_like(rgb1)\n alpha3 = np.empty_like(alpha1)\n temp_alpha = np.empty_like(alpha1)\n\n # hold (1 - alpha1)\n np.subtract(1, alpha1, out=temp_alpha)\n\n # alpha3\n np.copyto(dst=alpha3, src=temp_alpha)\n np.multiply(alpha2, alpha3, out=alpha3)\n np.add(alpha1, alpha3, out=alpha3)\n\n # numer1\n np.multiply(rgb1, alpha1[..., None], out=rgb3)\n\n # numer2\n np.multiply(alpha2, temp_alpha, out=temp_alpha)\n np.multiply(rgb2, temp_alpha[..., None], out=temp_rgb)\n\n # (numer1 + numer2)\n np.add(rgb3, temp_rgb, out=rgb3)\n\n # removing errstate is actually a significant speedup\n with np.errstate(invalid='ignore'):\n np.divide(rgb3, alpha3[..., None], out=rgb3)\n if not np.all(alpha3):\n rgb3[alpha3 == 0] = 0\n return rgb3, alpha3",
"def SetAlpha(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUC_SetAlpha(self, *args)",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)"
] |
[
"0.68926454",
"0.66775626",
"0.6642565",
"0.6370327",
"0.6348562",
"0.6272967",
"0.6214976",
"0.61931974",
"0.6173027",
"0.61116505",
"0.60668963",
"0.60490817",
"0.60149103",
"0.59393996",
"0.5928146",
"0.58661455",
"0.581233",
"0.58113575",
"0.5784807",
"0.57517695",
"0.5744461",
"0.57319784",
"0.570979",
"0.57062495",
"0.56973535",
"0.569398",
"0.5689959",
"0.5642135",
"0.55947435",
"0.55726963"
] |
0.6711814
|
1
|
Copies pixel data to this image. This method copies data from a sequence object into the image, starting at the upper left corner (0, 0), and continuing until either the image or the sequence ends. The scale and offset values are used to adjust
|
def putdata(self, dat, scale=1.0, offset=0.0):
data = np.array(dat)
data = data * scale + offset
channels, depth = self._get_channels_and_depth(self._mode)
siz = self.size
_im = np.ravel(self._instance)
data = data[:len(_im)]
_im = _im[:len(data)] = data
self._instance = _im.reshape((siz[1], siz[0], channels))
self._instance = self._instance.astype(depth)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def draw(self, frame, offset=OFS):\n frame[\n OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]\n ] = self.image",
"def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()",
"def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)",
"def image(self, img):\n # determine our effective width/height, taking rotation into account\n width = self.width\n height = self.height\n if self.rotation in (1, 3):\n width, height = height, width\n\n if isinstance(self.format, (RGB565Format, RGB888Format)) and img.mode != \"RGB\":\n raise ValueError(\"Image must be in mode RGB.\")\n if isinstance(self.format, (MHMSBFormat, MVLSBFormat)) and img.mode != \"1\":\n raise ValueError(\"Image must be in mode 1.\")\n\n imwidth, imheight = img.size\n if imwidth != width or imheight != height:\n raise ValueError(\n f\"Image must be same dimensions as display ({width}x{height}).\"\n )\n # Grab all the pixels from the image, faster than getpixel.\n pixels = img.load()\n # Clear buffer\n for i in range(len(self.buf)): # pylint: disable=consider-using-enumerate\n self.buf[i] = 0\n # Iterate through the pixels\n for x in range(width): # yes this double loop is slow,\n for y in range(height): # but these displays are small!\n if img.mode == \"RGB\":\n self.pixel(x, y, pixels[(x, y)])\n elif pixels[(x, y)]:\n self.pixel(x, y, 1) # only write if pixel is true",
"def _image_paste(self, image, dest_image, pos_x, pos_y):\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image",
"def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step",
"def step(self,image):\r\n\r\n\t\tself.gray = image",
"def paint(self, image, position):\n \n where = list(numpy.transpose(self.indices(position)))\n image[where] = self._value\n image.modified()",
"def draw(self, frame):\n frame[OFS:OFS+self.image.shape[0], OFS:OFS+self.image.shape[1]] = self.image",
"def CopyImageMem(self):\r\n r = CALL(\"CopyImageMem\",self,self.image,self.id,self.data.ctypes.data)\r\n return self.CheckForSuccessError(r)",
"def paste(self, image, xy=(0,0)):\n # Parse xy location from any type of unit to pixels\n x,y = xy\n x = units.parse_dist(x,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n y = units.parse_dist(y,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n xy = (x,y)\n # Need more options, eg anchor point, and coordinate xy\n self.drawer.flush()\n if isinstance(image, Canvas): image = image.img\n if image.mode == \"RGBA\":\n self.img.paste(image, xy, image) # paste using self as transparency mask\n else: self.img.paste(image, xy)\n self.update_drawer_img()\n return self",
"def calcdata(self):\n bot = max(0, self.r - self.radius)\n top = min(self.img.shape[0], self.r + self.radius)\n left = max(0, self.c - self.radius)\n right = min(self.img.shape[1], self.c + self.radius)\n self.data = self.img[bot:top, left:right]",
"def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image",
"def image(self, image):\n if image.mode != '1':\n raise ValueError('Image must be in mode 1.')\n imwidth, imheight = image.size\n if imwidth != self.width or imheight != self.height:\n raise ValueError('Image must be same dimensions as display ({0}x{1}).' \\\n .format(self.width, self.height))\n # Grab all the pixels from the image, faster than getpixel.\n pix = image.load()\n # Iterate through the memory pages\n index = 0\n for page in range(self._pages):\n # Iterate through all x axis columns.\n for x in range(self.width):\n # Set the bits for the column of pixels at the current position.\n bits = 0\n # Don't use range here as it's a bit slow\n for bit in [0, 1, 2, 3, 4, 5, 6, 7]:\n bits = bits << 1\n bits |= 0 if pix[(x, page*8+7-bit)] == 0 else 1\n # Update buffer byte and increment to next byte.\n self._buffer[index] = bits\n index += 1",
"def setPixel(self, value, position):\n (x,y,z) = position\n if z<0 or z>=self.length:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_SIZE)\n err = mambaCore.MB_PutPixel(self.seq[z].mbIm, value, position[0], position[1])\n mamba.raiseExceptionOnError(err)",
"def update_img(self):\n self.img = np.array(self.image)",
"def cv2_clipped_zoom_sequence(sequence, zoom_factor):\n height, width = sequence.shape[1:] # It's also the final desired shape\n new_height, new_width = int(height * zoom_factor), int(width * zoom_factor)\n\n # Handle padding when downscaling\n resize_height, resize_width = new_height, new_width\n pad_height1, pad_width1 = (height - resize_height) // 2, (width - resize_width) //2\n pad_height2, pad_width2 = (height - resize_height) - pad_height1, (width - resize_width) - pad_width1\n pad_spec = [(max(pad_height1, 0), max(pad_height2, 0)), (max(pad_width1, 0), max(pad_width2, 0))]\n\n result = np.empty((np.shape(sequence)[0], np.shape(sequence)[1], np.shape(sequence)[2]))\n\n for i, image in enumerate(sequence):\n resized_image = cv2.resize(image, (resize_width, resize_height))\n if pad_height1 > 0 and pad_width1 > 0:\n result[i] = np.pad(resized_image, pad_spec, mode='constant', constant_values=1)\n elif pad_height1 < 0 and pad_width1 < 0:\n result[i] = resized_image[-pad_height1:pad_height2, -pad_width1:pad_width2]\n else: \n result[i] = image\n return result",
"def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return",
"def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm",
"def _image_paste(self, image, dest_image, pos_x, pos_y):\n raise NotImplementedError",
"def process(self, image, annotation_meta=None):\n # image dasta stored inside DataRepresentation in data field\n data = image.data\n # internally we work with numpy arrays, so we need to convert it to pillow image object for making resize\n resized_data = Image.fromarray(data).resize((self.size, self.size), Image.ANTIALIAS)\n # return back data to numpy array\n data = np.array(resized_data)\n # expand dims for gray scale image\n if len(data.shape) == 2:\n data = np.expand_dims(data, axis=-1)\n image.data = data\n # return updated DataRepresentation\n return image",
"def paste(bg_img, img, x_offset, y_offset):\n y1, y2 = y_offset, y_offset + img.shape[0]\n x1, x2 = x_offset, x_offset + img.shape[1]\n\n c = img.shape[2]\n try:\n bg_img[y1:y2, x1:x2, 0:c] = img[:, :, 0:c]\n except IndexError:\n log.error(\"index exception.\")\n return bg_img",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False",
"def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target",
"def __init__(self, image, scr, view_point, cols=1, rows=1):\r\n self.z = 1\r\n super().__init__(image, scr, view_point, cols, rows)\r\n self.frame = randint(0, self.get_max_frame())",
"def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)",
"def update_image(self):\n if self.first is None or self.second is None:\n LOG.warn(\"No images set yet\")\n else:\n pos = self.slider.value()\n moved = np.roll(self.second, self.second.shape[0] / 2 - pos, axis=0)\n self.image_item.setImage(moved - self.first)",
"def plot_image_sequence(self):\r\n\r\n imv = pg.ImageView()\r\n\r\n imv.show()\r\n\r\n imv.setImage(self.imageData)\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 1, 0)",
"def update(self, i):\n data = next(self.stream)\n # Set x and y data...\n self.scat.set_offsets(np.concatenate([data[0], data[1]]).reshape((11, 2)))\n return self.scat,"
] |
[
"0.5834029",
"0.5822663",
"0.55779636",
"0.5547315",
"0.554644",
"0.5539611",
"0.5520755",
"0.55167884",
"0.55137485",
"0.54911315",
"0.5476981",
"0.5425262",
"0.5422462",
"0.53885484",
"0.5386036",
"0.538219",
"0.5372337",
"0.5331992",
"0.53313965",
"0.53288794",
"0.5293788",
"0.52936405",
"0.5261505",
"0.5248053",
"0.5221144",
"0.52205014",
"0.52114457",
"0.5211014",
"0.5196587",
"0.51935285"
] |
0.5909142
|
0
|
Seeks to the given frame in this sequence file. If you numpy2gifek beyond the end of the sequence, the method raises an EOFError exception. When a sequence file is opened, the library automatically seeks to frame 0. Note that in the current version of the library, most sequence formats only allows you to seek to the next frame.
|
def seek(self, frame):
if frame>=self.n_frames:
raise EOFError("Frame number is beyond the number of frames")
else:
self._frame_nr = frame
self._instance = self.frames[frame]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def current_frame(self, n):\n self.sound.seek(n)\n self._current_frame = n",
"def advance_in_file(self, file_pos):\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise",
"def next_frame(self, save_index=True):\n if len(self._frames) > self._findex + 1:\n self._findex += 1\n frame_start = self._findex * self._flen\n if not save_index:\n self._index = frame_start\n else:\n if self._index + self._flen <= len(self) - 1 and save_index:\n self._index += self._flen\n else:\n self._index = frame_start + len(self.frame) - 1\n return self._frames[self._findex]\n return None",
"def _load_frame(self, i):\n\n eof = False \n try:\n self.im.seek(i)\n except EOFError:\n eof = True\n\n return eof",
"def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)",
"def filenum_index( # noqa: F811\n index: Optional[int], seq: \"FileSequence\"\n) -> Optional[int]:\n if index is None:\n return None\n if index < 0:\n return index\n else:\n index -= seq.start\n # if the requested index was a positive number, and adjusting for the start\n # number throws it into the negatives, then we know the request is out of\n # range of the sequence, so we need to raise an error here to avoid returning\n # a file from the end of the sequence because of the negative roll over.\n if index < 0:\n raise IndexError(\"Frame Out of range\")\n else:\n return index",
"def moveToNextFrame(self):\n\t\tall_ts = [s for t in self.stamps_by_stream.values() for s in t]\n\t\tall_ts.sort()\n\t\tfirst_frame = all_ts[0]\n\n\t\tselected_index = bisect.bisect_right(all_ts, self._timeline.current_pos)-1\n\t\tif len(all_ts)-1 == selected_index:\n\t\t\t# We are already at the last frame\n\t\t\treturn\n\n\t\t# Move forward enough to be sure to reach the first frame\n\t\tincrease = 1\n\t\twhile all_ts[selected_index+increase] < first_frame:\n\t\t\tincrease += 1\n\n\t\tself._timeline.current_pos = all_ts[selected_index+increase]\n\t\tself.objectSelected.emit(\n\t\t self.getFileAtStamp(self._timeline.current_pos)\n\t\t)",
"def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = pixiimage()\n newobj.read(next_filename(\n self.sequencefilename))\n return newobj",
"def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()",
"def do_frame(self, arg):\n if not arg:\n # Just display the frame, without handling sticky.\n self.print_stack_entry(self.stack[self.curindex])\n return\n\n try:\n arg = int(arg)\n except (ValueError, TypeError):\n print(\n '*** Expected a number, got \"{0}\"'.format(arg), file=self.stdout)\n return\n if abs(arg) >= len(self.stack):\n print('*** Out of range', file=self.stdout)\n return\n if arg >= 0:\n self.curindex = arg\n else:\n self.curindex = len(self.stack) + arg\n self.curframe = self.stack[self.curindex][0]\n self.curframe_locals = self.curframe.f_locals\n self.print_current_stack_entry()\n self.lineno = None",
"def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset",
"def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = hdf5image()\n newobj.read(next_filename(self.filename))\n return newobj",
"def loadFringe(self, frame=1, filename=None):\n self.currentFrame = np.int(frame)\n if filename:\n ofdPath = filename\n else:\n ofdPath = os.path.join(self.directory, self.basename +'.ofd')\n\n self.getOfdInfo(ofdPath)\n\n if self.mode == '2channel':\n self.load2Channel()\n if self.mode == '1channel':\n self.load1Channel()\n if self.mode == 'mmap2channel':\n self.mmap2channel()\n if self.mode == 'mmap1channel':\n self.mmap1channel()",
"def seqIo_crop(fname, tname, frames):\n if not isinstance(frames, np.ndarray): frames=np.array(frames)\n sr = seqIo_reader(fname)\n sw = seqIo_writer(tname,sr.header)\n pad,_= sr.getFrame(0)\n pad = np.zeros(pad.size).astype(np.uint8)\n kp = frames>=0 & frames<sr.header['numFrames']\n if not np.all(kp): frames = frames[kp]\n print('%i out of bounds frames'% np.sum(~kp))\n ordered = np.all(frames[1:]==frames[:-1]+1)\n n= frames.size\n k=0\n for f in frames:\n if f<0:\n sw.addFrame(pad)\n continue\n I,ts = sr.getFrame(f)\n k+=1\n if ordered:\n sw.addFrame(I,ts)\n else:\n sw.addFrame(I)\n sr.close()\n sw.close",
"def _next_frame(self):\n ret, self.frame = self.capture.read()\n if not ret:\n self.logger.warning('Failed to read frame')\n if self.show_video:\n cv2.imshow('frame', self.frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit(0)\n return ret",
"def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))",
"def next(self):\n frame = self.xyzFile.nextFrame()\n if frame is None: return None\n \n newFrame = XYZFrame()\n newFrame.boxVectors = self.lattice.boxVectors\n refFrame = XYZFrame()\n refFrame.boxVectors = self.lattice.boxVectors\n atomsLists = self.propagateAtomsThroughPbc(frame.atoms, frame.boxSize)\n \n allAtoms = concatenate(atomsLists) \n posCount = len(atomsLists[0])\n \n match, referenceMatch, errors = self.match(atomsLists) \n for atomIndex in range(posCount):\n newFrame.atoms.append(XYZAtom(atomsLists[0][atomIndex].symbol\n , *self.lattice.positions[match[atomIndex]].x0))\n \n for atomIndex in range(posCount):\n refFrame.atoms.append(XYZAtom(allAtoms[referenceMatch[atomIndex]].__repr__())) \n refFrame.atoms[-1].x += 15\n \n for atomIndex in range(len(allAtoms)):\n refFrame.atoms.append(XYZAtom(allAtoms[atomIndex].__repr__())) \n refFrame.atoms[-1].x += 30\n \n return ProjectedFrame(newFrame, refFrame, errors)",
"def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._fp.tell())\n\n self._fp.seek(loc)",
"def moveFrame(self, num, callback=None): \n if(self.isLoaded() == False):\n return\n self.isPlaying = False\n self.currentFrameNumber = int(self.video.get(cv2.CAP_PROP_POS_FRAMES))\n self.setFrame(self.currentFrameNumber + num - 1, callback)",
"def rewind(self, j=0):\n pass",
"def moveToPreviousFrame(self):\n\t\tall_ts = [s for t in self.stamps_by_stream.values() for s in t]\n\t\tall_ts.sort()\n\t\tfirst_frame = all_ts[0]\n\n\t\tselected_index = bisect.bisect_right(all_ts, self._timeline.current_pos)-1\n\t\tif selected_index <= 0 or all_ts[selected_index-1] < first_frame:\n\t\t\t# There is no data before, or no frame. Do nothing\n\t\t\treturn\n\t\tself._timeline.current_pos = all_ts[selected_index-1]\n\t\tself.objectSelected.emit(\n\t\t self.getFileAtStamp(self._timeline.current_pos)\n\t\t)",
"def move_next(self, step=1):\n if self._index is not None and len(self) > self._index + step:\n self._index += step\n # if index >= end index of current frame --> recalculate findex\n if self._index >= self._findex * self._flen + self._flen:\n self._findex += int(math.ceil(step / float(self._flen)))\n return self[self._index]\n return None",
"def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]",
"def start_frame(self):\n\n # Check whether we're supposed to make a frame on this iteration:\n if self.frame_count % self.stride != 0:\n return\n\n # Check whether we're already making a frame. \n if self.in_scope:\n print(\"The Gif object for {} has encountered 'start_frame' twice\\\n without an intervening 'end_frame'\".format(self.filename))\n raise SyntaxError\n\n # Construct a new figure\n fig = plt.figure(figsize=(self.width,self.height), **(self.kwargs))\n self.current_frame = fig\n\n # Set the \"in_scope\" member True\n self.in_scope = True\n\n return self.current_frame",
"def _seek(self, iteration):\n\n # Validate it\n if iteration < 1:\n iteration = 1\n\n # Seek to one iteration before the specified iteration, then run the\n # network for one iteration, so the inspectors will show the right data\n self.iteration = iteration - 1\n self.experiment.position.iter = iteration - 1\n for sensor in self.sensors:\n assert sensor.type == 'VectorFileSensor'\n sensor.setParameter('position', self.iteration)\n self._step()",
"def video_frame_by_frame(path, offset=0, frame_range=None, step=1, end=None):\n cap = cv2.VideoCapture(path)\n\n if frame_range:\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n duration = cap.get(cv2.CAP_PROP_FRAME_COUNT) / fps\n duration = int(duration)\n if end is None:\n end = duration\n start = int(offset)\n\n # Just yield very step frame and currect time.\n frame_range = (i for i in range(start, end, step))\n for fr in frame_range:\n # Set the correct frame number to read.\n cap.set(cv2.CAP_PROP_POS_FRAMES, fr)\n ret, frame = cap.read()\n if ret:\n yield frame, cap.get(cv2.CAP_PROP_POS_MSEC)\n else:\n yield None, cap.get(cv2.CAP_PROP_POS_MSEC)\n\n else:\n if offset:\n # Set the correct offset point so we\n # dont read shit we dont need.\n fps = cap.get(cv2.CAP_PROP_FPS)\n fn = offset * fps\n cap.set(cv2.CAP_PROP_POS_FRAMES, fn)\n\n while cap.isOpened():\n ret, frame = cap.read()\n pos = cap.get(cv2.CAP_PROP_POS_MSEC)\n\n if ret:\n yield frame, pos\n else:\n break\n\n if end and pos / 1000 > end:\n LOG.debug('Stopped reading the file because of %s' % end)\n break\n\n cap.release()",
"def next_batch(self, frame_skip_count=5):\n frame_count = 0\n frame_divisor = max(frame_skip_count + 1, 1)\n while True:\n ret, frame = self.cap.read()\n if ret:\n if frame_count % frame_divisor == 0:\n yield frame\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n else:\n break",
"def advance_frame():\n # pylint: disable=global-statement\n global current_frame, current_loop\n current_frame = current_frame + 1\n if current_frame >= frame_count:\n current_frame = 0\n current_loop = current_loop + 1\n sprite_group[0][0] = current_frame",
"def grab_next_frame(self):\n if Rescue_PI.input_video_file_path is None:\n self.orig_frame = self.vs.read()\n self.frame = self.orig_frame.copy()\n else:\n _, self.frame = self.vs.read()\n # self.frame = cv2.rotate(self.frame, cv2.ROTATE_180)\n if self.frame is None:\n pass\n else:\n self.frame = imutils.resize(self.frame, width=frame_width_in_pixels)",
"def seek(self, position):\n if position < 0 or position >= self.tree_sequence.sequence_length:\n raise ValueError(\"Position out of bounds\")\n self._ll_tree.seek(position)"
] |
[
"0.581889",
"0.57004464",
"0.5665532",
"0.5629413",
"0.56082237",
"0.5516672",
"0.54625636",
"0.5416819",
"0.53481257",
"0.5330396",
"0.53217256",
"0.5236598",
"0.522815",
"0.5198737",
"0.5169777",
"0.51654124",
"0.51011217",
"0.5089388",
"0.5058896",
"0.5026361",
"0.50190157",
"0.49701354",
"0.49626288",
"0.4957303",
"0.49322975",
"0.49311402",
"0.49256465",
"0.4902985",
"0.4901985",
"0.48868737"
] |
0.67903125
|
0
|
Make this image into a thumbnail. This method modifies the image to contain a thumbnail version of itself, no larger than the given size. This method calculates an appropriate thumbnail size to preserve the aspect of the image, calls the
|
def thumbnail(self, size, resample=BICUBIC):
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self._instance = self.resize(size, resample, image=self._instance)
self.readonly = 0
self.pyaccess = None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def resize_image(image, size=(926, 617)):\n\n im = Image.open(image)\n im.convert('RGB')\n im.thumbnail(size)\n thumb_io = BytesIO()\n im.save(thumb_io, 'JPEG', quality=85)\n thumbnail = File(thumb_io, name=image.name)\n return thumbnail",
"def img_resize(infile, size):\n try:\n infile.thumbnail(size, Image.ANTIALIAS)\n except:\n print(\"cannot create thumbnail for '%s'\" % infile)\n return infile",
"def make_thumbnail(image, size=(100, 100)):\n logging.debug(image)\n\n im = create_colorblind_image(image)\n\n thumb_io = BytesIO() # create a BytesIO object\n\n im.save(thumb_io, 'PNG', quality=85) # save image to BytesIO object\n\n thumbnail = File(thumb_io, name=image.name) # create a django friendly File object\n\n return thumbnail",
"def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img",
"def thumbnail(im, config):\n\n im.thumbnail(\n (config['width'], config['height']),\n ANTIALIAS,\n )\n\n return im",
"def make_thumbnail(filepath):\n img = Image.open(filepath)\n thumb = None\n w, h = img.size\n\n # if it is exactly 128x128, do nothing\n if w == 128 and h == 128:\n return True\n\n # if the width and height are equal, scale down\n if w == h:\n thumb = img.resize((128, 128), Image.BICUBIC)\n thumb.save(filepath)\n return True\n\n # when the image's width is smaller than the height\n if w < h:\n # scale so that the width is 128\n ratio = w / 128.\n w_new, h_new = 128, int(h / ratio)\n thumb = img.resize((w_new, h_new), Image.BICUBIC)\n\n # crop the excess\n top, bottom = 0, 0\n margin = h_new - 128\n top, bottom = margin // 2, 128 + margin // 2\n box = (0, top, 128, bottom)\n cropped = thumb.crop(box)\n cropped.save(filepath)\n return True\n\n # when the image's height is smaller than the width\n if h < w:\n # scale so that the height is 128\n ratio = h / 128.\n w_new, h_new = int(w / ratio), 128\n thumb = img.resize((w_new, h_new), Image.BICUBIC)\n\n # crop the excess\n left, right = 0, 0\n margin = w_new - 128\n left, right = margin // 2, 128 + margin // 2\n box = (left, 0, right, 128)\n cropped = thumb.crop(box)\n cropped.save(filepath)\n return True\n return False",
"def _resize_image(filename, size):\n width, height = 0, 1\n\n try:\n import Image, ImageOps\n except ImportError:\n from PIL import Image, ImageOps\n\n if not size['resample']:\n resample = Image.ANTIALIAS\n\n img = Image.open(filename)\n if (img.size[width] > size['width'] or\n img.size[height] > size['height']):\n\n #If the image is big resize it with the cheapest resize algorithm\n factor = 1\n while (img.size[0] / factor > 2 * size['width'] and\n img.size[1] * 2 / factor > 2 * size['height']):\n factor *= 2\n if factor > 1:\n img.thumbnail((int(img.size[0] / factor),\n int(img.size[1] / factor)), resample=resample)\n\n if size['crop']:\n img = ImageOps.fit(img, (size['width'], size['height']), method=resample)\n else:\n img.thumbnail((size['width'], size['height']), resample=resample)\n\n try:\n img.save(filename, optimize=1)\n except IOError:\n img.save(filename)",
"def resize(self, size):\n return Image(self.pil_image.resize(size, PIL.Image.ANTIALIAS))",
"def make_thumbnail(self):\n # https://gist.github.com/valberg/2429288\n\n # make sure image data is set\n if not self.image_data:\n return False\n\n if self.proxy_data:\n return True\n\n # Create a resized version of the image\n image = Image.open(self.image_data)\n image.thumbnail(THUMBNAIL_SIZE, Image.BICUBIC)\n\n # Save the thumbnail to in-memory 'file'\n temp_thumb = BytesIO()\n image.save(temp_thumb, 'jpeg')\n temp_thumb.seek(0) # rewinds the file\n\n # Save image to a SimpleUploadFile which can be saved\n # into ImageField\n # TODO figure out how to pass base image's UUID before\n # image is committed to DB\n basename = os.path.basename(self.image_data.name)\n uuidname = os.path.splitext(basename)[0]\n suf = SimpleUploadedFile(uuidname,\n temp_thumb.read(), content_type='image/jpeg')\n thumb_filename = '{}_thumb.jpeg'.format(suf.name)\n\n # set save=False, or else it will infinite loop\n self.proxy_data.save(thumb_filename,\n suf,\n save=False)\n\n # Also store the real dimensions for the Pillow thumbnail\n self.proxy_width, self.proxy_height = image.size\n\n temp_thumb.close()\n\n return True",
"def bigThumbnail(self):\n\t\tfileCount = len(self.fileList)\n\t\tthumbSize = (200, 200)\n\t\timgHoriz = int(self.get_screen().get_width() / (thumbSize[1] + 20))\n\t\timgSize = (self.get_screen().get_width(), (thumbSize[1] + 20) * (int(fileCount / imgHoriz) + 2))\n\n\t\tpixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, imgSize[0], imgSize[1])\n\t\tfor file in range(len(self.fileList)):\n\t\t\ttry:\n\t\t\t\ttimg = gtk.gdk.pixbuf_new_from_file(self.fileList[file])\n\t\t\texcept:\n\t\t\t\tprint >> sys.stderr, \"Failed to load image %s\" % self.fileList[file]\n\t\t\t\tcontinue\n\t\t\ttimgSize = [timg.get_width(), timg.get_height()]\n\t\t\tif timgSize[0] > thumbSize[0] or timgSize[1] > thumbSize[1]:\n\t\t\t\tscaleFactor = 1.0 * thumbSize[0] / timgSize[0]\n\t\t\t\tif timgSize[1] * scaleFactor > thumbSize[1]:\n\t\t\t\t\tscaleFactor = 1.0 * thumbSize[1] / timgSize[1]\n\t\t\t\tself.scaleFactor = scaleFactor\n\t\t\t\ttimgSize[0] = int(timgSize[0] * scaleFactor)\n\t\t\t\ttimgSize[1] = int(timgSize[1] * scaleFactor)\n\t\t\t\ttimg = timg.scale_simple(timgSize[0], timgSize[1], gtk.gdk.INTERP_BILINEAR)\n\t\t\tpos = ( (file % imgHoriz) * (thumbSize[0] + 20) + 10 + (thumbSize[0] - timgSize[0]) / 2,\n\t\t\t\tint(file / imgHoriz) * (thumbSize[1] + 20) + 10)\n\n\t\t\tprint \" Rendering thumbnails; %d of %d\\r\" % (file, len(self.fileList)),\n\t\t\tsys.stdout.flush()\n\n\t\t\ttimg.copy_area(0, 0, timgSize[0], timgSize[1], pixbuf, pos[0], pos[1])\n\t\t\tdel timg\n\t\t\tgc.collect()\n\t\tprint\n\t\tself.currentPixbuf = pixbuf\n\t\tself.fileList = [ \"#\" ]\n\t\tself.fileName = \"#\"\n\t\tself.autoScale()\n\t\tself.display()",
"def generateThumbnail(img):\n\n if not img._thumbfn:\n return\n\n aimgfn = join(opts.root, img._filename)\n if not opts.fast:\n img._size = imageSize(aimgfn)\n\n athumbfn = join(opts.root, img._thumbfn)\n\n if opts.thumb_force:\n if opts.quiet: print \"forced regeneration of '%s'\" % img._thumbfn\n elif not exists(athumbfn):\n if opts.quiet: print \"thumbnail absent '%s'\" % img._thumbfn\n else:\n # Check if thumbsize has changed\n if not opts.fast:\n img._thumbsize = imageSize(athumbfn)\n if not checkThumbSize(img._size, \\\n img._thumbsize, \\\n opts.thumb_size):\n if opts.quiet: print \"thumbnail '%s size has changed\" % img._thumbfn\n try:\n # Clear cache for thumbnail size.\n del imageSizeCache[ athumbfn ]\n except:\n pass\n else:\n# pass\n# if opts.quiet: print \"thumbnail '%s' already generated (size ok)\" \\\n# % img._thumbfn\n return\n else:\n if opts.quiet: print \"thumbnail '%s' already generated\" % img._thumbfn\n return\n\n if opts.no_magick:\n if opts.quiet: print \"ImageMagick tools disabled, can't create thumbnail\"\n return\n\n # create necessary directories\n d = dirname(athumbfn)\n if not exists(d):\n os.makedirs(d)\n\n if opts.pil:\n\n try:\n im = PilImage.open(aimgfn)\n im.thumbnail((opts.thumb_size, opts.thumb_size), config.Thumbnails[\"Interpolation\"])\n im.save(athumbfn)\n\n img._thumbsize = im.size\n except IOError, e:\n raise SystemExit(\\\n \"Error: identifying file '%s'\" % aimgfn + str(e))\n\n else:\n\n cmd = getMagickProg('convert') + ' -border 2x2 '\n # FIXME check if this is a problem if not specified\n #cmd += '-interlace NONE '\n\n cmd += '-geometry %dx%d ' % (opts.thumb_size, opts.thumb_size)\n\n if opts.thumb_quality:\n cmd += '-quality %d ' % opts.thumb_quality\n\n # This doesn't add text into the picture itself, just the comment in\n # the header.\n if opts.copyright:\n cmd += '-comment \\\"%s\\\" ' % opts.copyright\n\n # We use [1] to extract the thumbnail when there is one.\n # It is harmless otherwise.\n subimg = \"\"\n if img._ext.lower() in [ \".jpg\", \".tif\", \".tiff\" ]:\n subimg = \"[1]\"\n\n cmd += '\"%s%s\" \"%s\"' % (aimgfn, subimg, athumbfn)\n\n if opts.quiet: print \"generating thumbnail '%s'\" % img._thumbfn\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n errs = string.join(errs, '\\n')\n print errs\n\n if subimg and \\\n re.compile('Unable to read subimage').search(errs):\n if opts.quiet: print \"retrying without subimage\"\n cmd = string.replace(cmd, subimg, \"\")\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n print string.join(errs, '\\n')\n\n else:\n img._thumbsize = imageSize(athumbfn)",
"def get_thumbnail(self, size):\n\n thumb = self.associated_images[b'thumbnail']\n return thumb",
"def create_thumb(source_fame, target_fame, target_w = 260, target_h=205):\r\n size = target_w, target_h\r\n im = Image.open(source_fame)\r\n width = im.size[0]\r\n height = im.size[1]\r\n newwidth = int(size[0])\r\n newheight = int(height*(newwidth/float(width)))\r\n if newheight > int(size[1]):\r\n newheight = int(size[1])\r\n newwidth = int(width*(newheight/float(height)))\r\n size = newwidth, newheight\r\n # Resize and save the image\r\n im.thumbnail(size, Image.ANTIALIAS)\r\n im.save(target_fame)",
"def thumbnail(self):\n\n if self._thumbnail is None:\n cover = self.cover()\n\n if cover is not None:\n self._thumbnail = cover.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n return self._thumbnail",
"def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()",
"def thumbnail_image(filename, size=(64, 64), format='.png'):\n\n try:\n im = Image.open(filename)\n im.thumbnail(size, Image.ANTIALIAS)\n\n basename = os.path.basename(filename)\n thumb_filename = os.path.join('thumbs', f'{basename.rsplit(\".\")[0]}_thumb.png')\n im.save(thumb_filename)\n print('Saved', thumb_filename)\n return True\n\n except Exception as e:\n print('Error converting file', filename)\n raise",
"def write_thumbnail(image_name, size):\n # TODO : use something else instead of image.thumbnail\n sizes = {\n 'small' : [30,40],\n 'medium' : [70,70],\n 'large' : [120,120]\n }\n image = Image.open(f'{WRITE_FOLDER}/{USER_NAME}/original/{image_name}')\n image.thumbnail((sizes[size][0], sizes[size][1]))\n image.save(f'{WRITE_FOLDER}/{USER_NAME}/{size}/{image_name}')",
"def resize_image(filename, out=None, thumbnail=False):\n\n if thumbnail:\n image_size = frappe.db.get_value(\n 'eBay Manager Settings', filters=None,\n fieldname='ebay_thumbnail_size')\n else:\n image_size = frappe.db.get_value(\n 'eBay Manager Settings', filters=None,\n fieldname='ebay_image_size')\n image_size = int(image_size)\n\n if image_size < 1:\n frappe.throw('Invalid image size: ' + str(image_size))\n\n size_string = '{}x{}>'.format(image_size, image_size)\n if out is not None:\n subprocess.call(\n ['convert', '-auto-orient', '-resize', size_string,\n filename, out])\n else:\n subprocess.call(\n ['mogrify', '-auto-orient', '-resize', size_string, filename])",
"def resize_img(self, filename: str, size: Tuple[int, int] = (299, 299)):\n img = Image.open(join(self.source_dir, filename))\n width, height = img.size\n orig_shape = np.array(img.size)\n wanted_shape = np.array(size)\n ratios = wanted_shape / orig_shape\n wanted_width, wanted_height = size\n ratio_w, ratio_h = wanted_width / width, wanted_height / height\n\n if np.alltrue(ratios > 1):\n # Both sides of the image are shorter than the desired dimension,\n # so take the side that's closer in size and enlarge the image\n # in both directions to make that one fit\n factor = min(ratio_h, ratio_w)\n img = img.resize((int(width * factor), int(height * factor)))\n\n # Now we have an image that's either larger than the desired shape\n # or at least one side matches the desired shape and we can resize\n # with contain\n cover = resizeimage.resize_contain(img, size)\n cover.save(join(self.dest_dir, filename), 'JPEG')",
"def setThumbnailImage(*args):",
"def thumbnail(self, options):\n params = {\n 'width': options['width'] if 'width' in options else 50,\n 'height': options['height'] if 'height' in options else 50,\n 'smartCropping': options['smartCropping'] if 'smartCropping' in options else False\n }\n\n return Base._postWithOptions(self, _thumbnailUrl, options, params)",
"def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)",
"def scale_image(image: Image, scale: float) -> Image:\n width = round(image.width * scale)\n height = round(image.height * scale)\n image.thumbnail((width, height))\n return image",
"def savethumb(image, fname, outpath, size=(128,128), preserve_name=False):\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"thumbnail_{size[0]}_{size[1]}\")\n else:\n fpath = genSavePath(outpath, fname)\n im = copy(image)\n im.thumbnail(size)\n try:\n im.save(fpath, subsample=\"keep\", qtables=image.quantization, optimize=True)\n\n except IOError as m:\n print( \"Thumbnail({}) image creation failed for: {}. \\nReason:{}\".format(size,fname,m))",
"def GetThumbnail(self, type, maxsize): # real signature unknown; restored from __doc__\n pass",
"def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible(\"image\"):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n scaleconf = image_conf[\"imgsize\"]\n # scale string is something like: 'mini 200:200' and\n # we need the name only: 'mini'\n if scaleconf == \"_original\":\n scale = None\n else:\n scale = scaleconf.split(\" \")[0]\n scales = item.restrictedTraverse(\"@@images\")\n return scales.scale(\"image\", scale)",
"def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible('image'):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get('image', None)\n if image_conf:\n scaleconf = image_conf['imgsize']\n # Scale string is something like: 'mini 200:200'.\n # We need the name only: 'mini'.\n scale = scaleconf.split(' ')[0]\n scales = ploneapi.content.get(path='@@images')\n return scales.scale('image', scale)",
"def generate_thumb(origin, size, fn):\n assert isinstance(size, int), 'Integers are expected'\n img = Image.open(origin)\n path = os.path.dirname(origin)\n\n new_img = img.resize((size, size), Image.ANTIALIAS)\n thumb_path = os.path.join(path, fn)\n new_img.save(thumb_path)\n return thumb_path",
"def generateScaled(img):\n\n if not img._scaledfn:\n return\n\n aimgfn = join(opts.root, img._filename)\n if not opts.fast:\n img._size = imageSize(aimgfn)\n\n ascaledfn = join(opts.root, img._scaledfn)\n\n if opts.scaled_force:\n if opts.quiet: print \"forced regeneration of '%s'\" % img._scaledfn\n elif not exists(ascaledfn):\n if opts.quiet: print \"thumbnail absent '%s'\" % img._scaledfn\n else:\n # Check if scaledsize has changed\n if not opts.fast:\n img._scaledsize = imageSize(ascaledfn)\n if not checkThumbSize(img._size, \\\n img._scaledsize, \\\n opts.scaled_size):\n if opts.quiet: print \"thumbnail '%s size has changed\" % img._scaledfn\n try:\n # Clear cache for thumbnail size.\n del imageSizeCache[ ascaledfn ]\n except:\n pass\n else:\n if opts.quiet: print \"thumbnail '%s' already generated (size ok)\" \\\n % img._scaledfn\n return\n else:\n if opts.quiet: print \"thumbnail '%s' already generated\" % img._scaledfn\n return\n\n if opts.no_magick:\n if opts.quiet: print \"ImageMagick tools disabled, can't create thumbnail\"\n return\n\n # create necessary directories\n d = dirname(ascaledfn)\n if not exists(d):\n os.makedirs(d)\n\n if opts.pil:\n\n try:\n im = PilImage.open(aimgfn)\n im.thumbnail((opts.scaled_size, opts.scaled_size), config.ScaledImages[\"Interpolation\"])\n im.save(ascaledfn)\n\n img._scaledsize = im.size\n except IOError, e:\n raise SystemExit(\\\n \"Error: identifying file '%s'\" % aimgfn + str(e))\n\n else:\n\n cmd = getMagickProg('convert') + ' -border 2x2 '\n # FIXME check if this is a problem if not specified\n #cmd += '-interlace NONE '\n\n cmd += '-geometry %dx%d ' % (opts.scaled_size, opts.scaled_size)\n\n if opts.scaled_quality:\n cmd += '-quality %d ' % opts.scaled_quality\n\n # This doesn't add text into the picture itself, just the comment in\n # the header.\n if opts.copyright:\n cmd += '-comment \\\"%s\\\" ' % opts.copyright\n\n # We use [1] to extract the thumbnail when there is one.\n # It is harmless otherwise.\n subimg = \"\"\n if img._ext.lower() in [ \".jpg\", \".tif\", \".tiff\" ]:\n subimg = \"[1]\"\n\n cmd += '\"%s%s\" \"%s\"' % (aimgfn, subimg, ascaledfn)\n\n if opts.quiet: print \"generating thumbnail '%s'\" % img._scaledfn\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n errs = string.join(errs, '\\n')\n print errs\n\n if subimg and \\\n re.compile('Unable to read subimage').search(errs):\n if opts.quiet: print \"retrying without subimage\"\n cmd = string.replace(cmd, subimg, \"\")\n\n (chin, chout, cherr) = os.popen3(cmd)\n errs = cherr.readlines()\n chout.close()\n cherr.close()\n if errs:\n print >> sys.stderr, \\\n \"Error: running convert program on %s:\" % aimgfn\n print string.join(errs, '\\n')\n\n else:\n img._scaledsize = imageSize(ascaledfn)",
"def resize_image(self, filename, size=(299,299,3)):\n path = join(self.source_dir, filename)\n img = Image.open(path)\n img = img.resize(size)\n img.save(join(self.dest_dir, filename), 'JPEG', optimize=True)"
] |
[
"0.7389323",
"0.72575986",
"0.7197877",
"0.71460813",
"0.69607085",
"0.6918584",
"0.6914563",
"0.6866507",
"0.6844879",
"0.68147904",
"0.67465633",
"0.6732139",
"0.6689216",
"0.66817874",
"0.6676919",
"0.664664",
"0.65712076",
"0.6569657",
"0.6549778",
"0.64706296",
"0.6459728",
"0.6432724",
"0.64288604",
"0.6428491",
"0.6400077",
"0.6365956",
"0.6356335",
"0.6344372",
"0.63354856",
"0.6298475"
] |
0.7940607
|
0
|
calculate point (x,y) for a given angle ang on an ellipse with its center at centerx, centery and its horizontal radiush and its vertical radiusv
|
def _get_pointFromEllipseAngle(self, centerx, centery, radiush, radiusv, ang):
th = np.radians(ang)
ratio = (radiush/2.0)/float(radiusv/2.0)
x = centerx + radiush/2.0 * np.cos(th)
y = centery + radiusv/2.0 * np.sin(th)
return int(x), int(y)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator",
"def ellipse_pt(th, x_c, y_c, a, b, rot):\n x = x_c + (a * cos(th) * cos(rot) - b * sin(th) * sin(rot))\n y = y_c + (a * cos(th) * sin(rot) - b * sin(th) * cos(rot))\n return x, y",
"def ellipse(x,y,a,b):\n return ((x/float(a))**2 + (y/float(b))**2)",
"def get_points_on_ellipse(a, b, numPoints, startAngle = 0, verbose = False, increment = 0.01):\n def distance(x1,y1,x2,y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)\n x0 = a\n y0 = 0\n angle = 0\n d = 0\n while(angle <= 360):\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n d += distance(x0,y0,x,y)\n x0 = x\n y0 = y\n angle += increment\n if verbose:\n print(\"The estimated circumference of ellipse is {:f}\".format(d))\n points = []\n arcLength = d/numPoints\n angle = 0\n x0 = a\n y0 = 0\n angle0 = 0\n while(angle0 < startAngle):\n angle += increment\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n x0 = x\n y0 = y\n angle0 = angle\n for i in range(numPoints):\n dist = 0\n while(dist < arcLength):\n angle += increment\n x = a * np.cos(np.radians(angle))\n y = b * np.sin(np.radians(angle))\n dist += distance(x0,y0,x,y)\n x0 = x\n y0 = y\n if verbose:\n print(\n \"{} : angle = {:.2f}\\tdifference = {:.2f}\\tDistance {:.2f}\"\n .format(i+1,angle, angle-angle0,dist))\n points.append([x0, y0])\n angle0 = angle\n return np.array(points)",
"def generate_ellipse(R1,R2,center,theta,N=100):\r\n t = np.linspace(0.0,2.0*np.pi,N)\r\n x = R1*np.cos(t)*np.cos(theta) - R2*np.sin(t)*np.sin(theta) + center[0]\r\n y = R1*np.cos(t)*np.sin(theta) + R2*np.sin(t)*np.cos(theta) + center[1]\r\n return x,y",
"def drawEllipse(img, center, axes, angle, startAngle=0, endAngle=360, color = (0,0,255), fill = -1):\n\tcv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, fill)",
"def polar_coord(point, center):\n x = point[0] - center[0]\n y = point[1] - center[1]\n rho = np.sqrt(x ** 2 + y ** 2)\n phi = np.arctan2(y, x)\n return np.array([phi, rho])",
"def r_ellipse(self,xc=None,yc=None):\n x = self.x\n y = self.y\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.rel = sqrt(self.cxx*(x-xc)**2 +\n\t\t self.cyy*(y-yc)**2 +\n\t\t self.cxy*(x-xc)*(y-yc)\n\t\t )",
"def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n \n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n \n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts",
"def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):\n pts = np.zeros((360*k+1, 2))\n\n beta = -angle * np.pi/180.0\n sin_beta = np.sin(beta)\n cos_beta = np.cos(beta)\n alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])\n\n sin_alpha = np.sin(alpha)\n cos_alpha = np.cos(alpha)\n\n pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)\n pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)\n\n return pts",
"def cartesian_to_ellipse(center, angle, lengths):\n xInd, yInd = np.mgrid[:512, :512]\n major = max(lengths)/np.mean(lengths)\n minor = min(lengths)/np.mean(lengths)\n xInd, yInd = xInd - center[0], yInd - center[1]\n xInd, yInd = rotate(xInd, yInd, angle=-angle)\n xInd, yInd = xInd*minor, yInd*major\n xInd, yInd = rotate(xInd, yInd, angle=angle)\n return xInd, yInd",
"def ellipse(self, x0, y0, a, b, n, ax=None, **kwargs):\n ax = kwargs.pop('ax', None) or self._check_ax()\n g = pyproj.Geod(a=self.rmajor, b=self.rminor)\n # Gets forward and back azimuths, plus distances between initial\n # points (x0, y0)\n azf, azb, dist = g.inv([x0, x0], [y0, y0], [x0+a, x0], [y0, y0+b])\n tsid = dist[0] * dist[1] # a * b\n\n # Initializes list of segments, calculates \\del azimuth, and goes on\n # for every vertex\n seg = [self(x0+a, y0)]\n AZ = numpy.linspace(azf[0], 360. + azf[0], n)\n for i, az in enumerate(AZ):\n # Skips segments along equator (Geod can't handle equatorial arcs).\n if numpy.allclose(0., y0) and (numpy.allclose(90., az) or\n numpy.allclose(270., az)):\n continue\n\n # In polar coordinates, with the origin at the center of the\n # ellipse and with the angular coordinate ``az`` measured from the\n # major axis, the ellipse's equation is [1]:\n #\n # a * b\n # r(az) = ------------------------------------------\n # ((b * cos(az))**2 + (a * sin(az))**2)**0.5\n #\n # Azymuth angle in radial coordinates and corrected for reference\n # angle.\n azr = 2. * numpy.pi / 360. * (az + 90.)\n A = dist[0] * numpy.sin(azr)\n B = dist[1] * numpy.cos(azr)\n r = tsid / (B**2. + A**2.)**0.5\n lon, lat, azb = g.fwd(x0, y0, az, r)\n x, y = self(lon, lat)\n\n # Add segment if it is in the map projection region.\n if x < 1e20 and y < 1e20:\n seg.append((x, y))\n\n poly = Polygon(seg, **kwargs)\n ax.add_patch(poly)\n\n # Set axes limits to fit map region.\n self.set_axes_limits(ax=ax)\n\n return poly",
"def ellipse(self, x, y, radiusx, radiusy, rotation=0, startangle=0, endangle=2 * pi, anticlockwise=False):\n self._impl.ellipse(x, y, radiusx, radiusy, rotation, startangle, endangle, anticlockwise)",
"def create_ellipse(self, ratio):\n circ = Point(self.center).buffer(1.0)\n ell = affinity.scale(circ, float(\n self.lengths[0]*ratio), float(self.lengths[1]*ratio))\n ellr = affinity.rotate(ell, self.angle)\n return ellr",
"def getEllipse(self, xc, Sigma, nSigma=2):\n\n if nla.det(Sigma) == 0:\n return None\n\n w, v = nla.eig(Sigma)\n D = np.diag(w, 0)\n\n theta = np.linspace(0, 2*np.pi, 100, endpoint=True)\n circle = nSigma*np.vstack((np.cos(theta), np.sin(theta)))\n\n el = sla.sqrtm(D)\n el = el.dot(circle)\n el = v.dot(el)\n\n XY = xc + el\n\n return XY",
"def circle(center, perp_vect, radius, element_number=10):\n # tl = [0, 0.2, 0.4, 0.6, 0.8]\n tl = np.linspace(0, 1, element_number)\n\n # vector form center to edge of circle\n # u is a unit vector from the centre of the circle to any point on the\n # circumference\n\n # normalized perpendicular vector\n n = perp_vect / np.linalg.norm(perp_vect)\n\n # normalized vector from the centre to point on the circumference\n u = perpendicular_vector(n)\n u /= np.linalg.norm(u)\n\n pts = []\n\n for t in tl:\n # u = np.array([0, 1, 0])\n # n = np.array([1, 0, 0])\n pt = (\n radius * np.cos(t * 2 * np.pi) * u\n + radius * np.sin(t * 2 * np.pi) * np.cross(u, n)\n + center\n )\n\n pt = pt.tolist()\n pts.append(pt)\n\n return pts",
"def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2",
"def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])",
"def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)",
"def fit_ellipse(x,y):\r\n \r\n def fit(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C))\r\n n = np.argmax(np.abs(E))\r\n a = V[:,n]\r\n return a\r\n \r\n def ellipse_center(a):\r\n b,c,d,f,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[0]\r\n num = b*b-a*c\r\n x0=(c*d-b*f)/num\r\n y0=(a*f-b*d)/num\r\n return np.array([x0,y0])\r\n \r\n def ellipse_angle_of_rotation(a):\r\n b,c,a = a[1]/2, a[2], a[0]\r\n return 0.5*np.arctan(2*b/(a-c))\r\n \r\n def ellipse_axis_length(a):\r\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\r\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\r\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\r\n res1=np.sqrt(up/down1)\r\n res2=np.sqrt(up/down2)\r\n return np.array([res1, res2])\r\n \r\n a = fit(x,y)\r\n center = ellipse_center(a)\r\n theta = ellipse_angle_of_rotation(a)\r\n [R1,R2] = ellipse_axis_length(a)\r\n\r\n return R1, R2, center, theta",
"def area_ellipse(radius_x: float, radius_y: float) -> float:\r\n if radius_x < 0 or radius_y < 0:\r\n raise ValueError(\"area_ellipse() only accepts non-negative values\")\r\n return pi * radius_x * radius_y",
"def _gen_ellipse(twiss, ep=1, num=100):\n a, b, c = twiss\n\n t = np.linspace(0, 2 * np.pi, num)\n t0 = np.arctan(a)\n x = np.sqrt(b * ep) * np.cos(t)\n y = np.sqrt(c * ep) * np.sin(t - t0)\n\n return np.vstack([x, y])",
"def get_x_y_from_center(center, angle):\n print \"center\", center\n size_of_img = (640, 480)\n alpha_x = angle + (center[1] - 0.5 * size_of_img[1]) * camera_y_angle / size_of_img[1] \n alpha_y = (center[0] - 0.5 * size_of_img[0]) * camera_x_angle / size_of_img[0] \n print \"angle y :\", alpha_y\n delta_x = height / math.tan(math.radians(alpha_x))\n d = math.sqrt(delta_x ** 2 + height ** 2)\n delta_y = d * math.sin(math.radians(alpha_y))\n return round(delta_x), round(delta_y)",
"def tilted_ellipse(s, pos1, pos2, size_x, size_y, color, angle):\n surface = pygame.Surface((150, 150), pygame.SRCALPHA, 32).convert_alpha()\n ellipse(surface, color, (0, 0, size_x, size_y))\n surface2 = pygame.transform.rotate(surface, angle)\n return s.blit(surface2, (pos1, pos2))",
"def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None):\n tensor = torch.zeros(2, height, width).float()\n\n # Create the canvas for the affinity output\n imgAffinity = Image.new(\"RGB\", (width, height), \"black\")\n totensor = transforms.Compose([transforms.ToTensor()])\n\n draw = ImageDraw.Draw(imgAffinity)\n r1 = radius\n p = point\n draw.ellipse((p[0] - r1, p[1] - r1, p[0] + r1, p[1] + r1), (255, 255, 255))\n\n del draw\n\n # Compute the array to add the affinity\n array = (np.array(imgAffinity) / 255)[:, :, 0]\n\n angle_vector = np.array(center) - np.array(point)\n angle_vector = normalize(angle_vector)\n affinity = np.concatenate([[array * angle_vector[0]], [array * angle_vector[1]]])\n\n # print (tensor)\n if not img_affinity is None:\n # Find the angle vector\n # print (angle_vector)\n if length(angle_vector) > 0:\n angle = py_ang(angle_vector)\n else:\n angle = 0\n # print(angle)\n c = np.array(colorsys.hsv_to_rgb(angle / 360, 1, 1)) * 255\n draw = ImageDraw.Draw(img_affinity)\n draw.ellipse((p[0] - r1, p[1] - r1, p[0] + r1, p[1] + r1), fill=(int(c[0]), int(c[1]), int(c[2])))\n del draw\n re = torch.from_numpy(affinity).float() + tensor\n return re, img_affinity",
"def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None):\n tensor = torch.zeros(2,height,width).float()\n\n # Create the canvas for the afinity output\n imgAffinity = Image.new(\"RGB\", (width,height), \"black\")\n totensor = transforms.Compose([transforms.ToTensor()])\n \n draw = ImageDraw.Draw(imgAffinity) \n r1 = radius\n p = point\n draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),(255,255,255))\n\n del draw\n\n # Compute the array to add the afinity\n array = (np.array(imgAffinity)/255)[:,:,0]\n\n angle_vector = np.array(center) - np.array(point)\n angle_vector = normalize(angle_vector)\n affinity = np.concatenate([[array*angle_vector[0]],[array*angle_vector[1]]])\n\n # print (tensor)\n if not img_affinity is None:\n # Find the angle vector\n # print (angle_vector)\n if length(angle_vector) >0:\n angle=py_ang(angle_vector)\n else:\n angle = 0\n # print(angle)\n c = np.array(colorsys.hsv_to_rgb(angle/360,1,1)) * 255\n draw = ImageDraw.Draw(img_affinity) \n draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),fill=(int(c[0]),int(c[1]),int(c[2])))\n del draw\n re = torch.from_numpy(affinity).float() + tensor\n return re, img_affinity",
"def extractOblateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for oblate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2. )\n\n\n # the oblate surface area\n surface = 2. * np.pi * kperp**2. * ( 1. + ( (1. - e**2.) / e ) * np.arctanh(e) )\n\n return surface",
"def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r",
"def ellipse(radii = (10,5), angle_resolution = 2.5, layer = 0):\n D = Device(name = 'ellipse')\n a = radii[0]\n b = radii[1]\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n r = a*b / (sqrt((b*cos(t))**2 + (a*sin(t))**2))\n xpts = r*cos(t)\n ypts = r*sin(t)\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D",
"def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))"
] |
[
"0.7398188",
"0.7084216",
"0.7043236",
"0.688056",
"0.6867334",
"0.6799484",
"0.6770698",
"0.67403173",
"0.6703428",
"0.66681963",
"0.66667646",
"0.6660637",
"0.65539074",
"0.6545677",
"0.6435292",
"0.6419396",
"0.6383157",
"0.6372657",
"0.6366095",
"0.636599",
"0.63603014",
"0.62984353",
"0.62974167",
"0.62293226",
"0.6221004",
"0.62207985",
"0.6199629",
"0.61959946",
"0.61890906",
"0.6186215"
] |
0.836559
|
0
|
A simple 2D drawing interface for PIL images.
|
def Draw(im, mode=None):
# try:
# return im.getdraw(mode)
# except AttributeError:
# return ImageDraw(im, mode)
return ImageDraw(im)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def draw(self, x, y, dx, dy, color):\n\n draw = ImageDraw.Draw(self.image)\n\n draw.rectangle([(x,y),(dx,dy)], color, outline=None)",
"def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image",
"def draw_img(self, i, j, k):\n if k < len(self.images):\n img = self.images[k]\n r = self.get_rect(i, j)\n self.screen.blit(img, r)",
"def Draw(self):\n\t\tGameImage.Draw(self, self.coords)",
"def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))",
"def draw(self):\n return ImageDraw.Draw(self.buffer)",
"def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())",
"def draw():",
"def drawImage(img, ax):\n if (len(img.shape) != 3 or img.shape[2] != 3\n or np.min(img) < 0 or np.max(img) > 1):\n raise ValueError(\"'img' must be WxHx3, with all entries in [0,1].\")\n ax.imshow(img.transpose((1,0,2)), aspect='equal', interpolation='nearest',\n origin='lower')",
"def Draw(*args, **kwargs):\n return _gdi_.ImageList_Draw(*args, **kwargs)",
"def render_2d_vector(v1, gridsize=50):\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n fb.render_vector_2d( v1, scale=gridsize)\n fb.save('vec.png')",
"def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.",
"def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image",
"def draw(self, surface):\r\n surface.blit(self.image, self.rect)",
"def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')",
"def new_image(self, width, height, background=None, mode=\"RGBA\"):\n self.img = PIL.Image.new(mode, (width, height), background)\n self.width,self.height = width,height\n self.drawer = aggdraw.Draw(self.img)",
"def draw_image(self, image, src_coor, src_size, dest_coor, dest_size, angle = 0):\n img = Image_process.update(image, src_coor, src_size, dest_size, angle)\n self.canvas.create_image(dest_coor, image=img)",
"def draw_a50(self):\r\n\t\tpg.draw.rect(self.image, (100, 200, 100), self.rect)\r\n\t\r\n\t\t#self.display_surface.blit(self.image, self.rect)\r",
"def draw(iiter):\n from matplotlib import pyplot as plt\n fig = plt.gcf()\n fig.canvas.draw()",
"def draw(self):\n self.screen.blit(self.image, self.rect)",
"def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))",
"def draw(self):\n self.write_image()\n self.update()",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def draw(self, surface):\n surface.blit(self.image, self.rect)",
"def draw_image(self, path, x=0, y=0, w=128, h=128):\n x2 = x + w - 1\n y2 = y + h - 1\n if self.is_off_grid(x, y, x2, y2):\n return\n with open(path, \"rb\") as f:\n chunk_height = 1024 // w\n chunk_count, remainder = divmod(h, chunk_height)\n chunk_size = chunk_height * w * 2\n chunk_y = y\n if chunk_count:\n for c in range(0, chunk_count):\n buf = f.read(chunk_size)\n self.set_window(x, chunk_y,\n x2, chunk_y + chunk_height - 1,\n buf)\n chunk_y += chunk_height\n if remainder:\n buf = f.read(remainder * w * 2)\n self.set_window(x, chunk_y,\n x2, chunk_y + remainder - 1,\n buf)",
"def __init__(self, width, height, background=None, mode=\"RGBA\", ppi=300):\n # unless specified, interpret width and height as pixels\n width = units.parse_dist(width, default_unit=\"px\", ppi=ppi)\n height = units.parse_dist(height, default_unit=\"px\", ppi=ppi)\n width,height = int(round(width)),int(round(height))\n # create image\n self.img = PIL.Image.new(mode, (width, height), background)\n # create drawer\n self.drawer = aggdraw.Draw(self.img)\n # remember info\n self.background = background\n self.ppi = ppi\n # by default, interpret all sizes in % of width\n self.default_unit = \"%w\"\n # by default, interpret all coordinates in pixel space\n self.pixel_space()",
"def example_BSR():\n pts = [(1,1),(2,2),(3,3)]\n lines = [ [ (1,1), (1,2), (2,1)], [ (6,1), (1,6), (5,-1)] ]\n\n bloody_simple_2drender('2d_render.png', pts=pts, vecs=pts, lines=lines )",
"def draw_point(x, y):\n map_image = Image.open('map.png')\n map_image.putpixel((x, y), (0, 255, 0))\n map_image.save('map.png')\n map_image.show('map.png')",
"def paint(self, draw, x, y, w, h):\n\t\tpass",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)"
] |
[
"0.63596463",
"0.6330536",
"0.6326034",
"0.62757623",
"0.62357527",
"0.61913747",
"0.6174545",
"0.61168766",
"0.6074993",
"0.606635",
"0.59896314",
"0.59639984",
"0.5949333",
"0.5922507",
"0.5905733",
"0.58952296",
"0.5886632",
"0.5886068",
"0.5878178",
"0.5874104",
"0.58549225",
"0.5833198",
"0.583131",
"0.583131",
"0.57975495",
"0.5780308",
"0.57681143",
"0.5756201",
"0.57472926",
"0.5718621"
] |
0.6600076
|
0
|
Gets a mode descriptor for the given mode.
|
def getmode(self, mode):
modes = {}
# core modes
for m, (basemode, basetype, bands) in _MODEINFO.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype)
# extra experimental modes
modes["RGBa"] = ModeDescriptor("RGBa",
("R", "G", "B", "a"), "RGB", "L")
modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L")
modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L")
modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L")
# mapping modes
modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L")
modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L")
modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L")
# set global mode cache atomically
_modes = modes
return _modes[mode]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def mode(self):\n return self._data.get('mode', None)",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_mode(self):\r\n return self.mode",
"def getmode(self):\n return self.mode",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def mode(self):\n return self._lift(\"mode\")",
"def getMode(self):\n return self._mode",
"def _get_mode(self):\n raise NotImplementedError",
"def mode(self) -> Mode:\n return self._mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def mode(self):\n return self._mode",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def mode(self):\n return self.__mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def mode(self):\r\n return self._mode",
"def _get_mode():\n return context.get_context('mode')",
"def getMode(self):\n with self.lock:\n mode = self.mode\n return mode",
"def getMode(self, modeName = None):\n\t\tif modeName not in self.modes:\n\t\t\tif modeName == None:\n\t\t\t\traise Exception(\"Get schema '%s' error\" % self.name)\n\t\t\telse:\n\t\t\t\traise Exception(\"Get schema '%s' with mode name '%s' error\" % (self.name, str(modeName)))\n\t\treturn self.modes.get(modeName)",
"def mode(self):\n return self._mode_func",
"def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...",
"def mode(self):\n\n return self._mode",
"def mode(self) -> str:\r\n return self._mode",
"def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def mode(self):\n if \"mode\" in self.recipe:\n return self.recipe[\"mode\"]\n else:\n raise ValueError(\"No mode defined for recipe {}!\".format(self))",
"def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return"
] |
[
"0.70583034",
"0.70422727",
"0.7036573",
"0.7019406",
"0.7012943",
"0.7008377",
"0.69788706",
"0.6930293",
"0.6914595",
"0.6872009",
"0.6854045",
"0.6854045",
"0.6854045",
"0.68409944",
"0.6836563",
"0.6831456",
"0.6831456",
"0.6831456",
"0.6809317",
"0.68004644",
"0.6780946",
"0.6765356",
"0.6734477",
"0.6724337",
"0.67115784",
"0.6615814",
"0.65992427",
"0.65966094",
"0.65931",
"0.6583147"
] |
0.7578383
|
0
|
Common check to enforce type and sanity check on size tuples
|
def _check_size(size):
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] < 0 or size[1] < 0:
raise ValueError("Width and height must be >= 0")
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])",
"def __size_restriction_incorrect_tuple_number(self):\n\n strTestName = 'Tuple size lower than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Tuple parameter')\n RxCSObject.paramType('parameter1', tuple)\n RxCSObject.paramSizL('parameter1', 3, mul=2)\n\n RxCSObject.parameter1 = (1, 2, 3, 4, 5, 6)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def check_resize_size(size):\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for i, value in enumerate(size):\n check_value(value, (1, INT32_MAX), \"size at dim {0}\".format(i))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")",
"def __size_restriction_incorrect_tuple_parameter(self):\n\n strTestName = 'Tuple size lower or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'Tuple parameter')\n RxCSObject.paramType('parameter1', tuple)\n RxCSObject.paramSizLE('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 2\n RxCSObject.parameter1 = (1, 2, 3, 4, 5, 6)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def __size_restriction_correct_tuple_number(self):\n\n strTestName = 'Tuple size equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Tuple parameter')\n RxCSObject.paramType('parameter1', tuple)\n RxCSObject.paramSizEq('parameter1', 3, mul=2)\n\n RxCSObject.parameter1 = (1, 2, 3, 4, 5, 6)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'",
"def test_nested_one_arg_short():\n\n @type_checked\n def _run_test(thing:(float, int, str)): pass\n\n with pytest.raises(TypeError) as error:\n _run_test((\"123\", 123.12))\n\n assert error.exconly() == (\n \"TypeError: Argument length mismatch. \"\n \"Expected a tuple of float, int, str.\"\n )",
"def test_tuples():\n\n @type_checked\n def _run_test(something:(str, int, bool)):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=(None, \"12\", 1))",
"def check_crop_size(size):\n type_check(size, (int, list, tuple), \"size\")\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for value in size:\n check_value(value, (1, FLOAT_MAX_INTEGER))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")",
"def __size_restriction_inccorrect_string_tuple(self):\n\n strTestName = 'String size higher than the size of a tuple (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tRefParameter1', 'Tuple ref. parameter')\n RxCSObject.paramType('tRefParameter1', tuple)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizH('parameter1', 'tRefParameter1')\n\n RxCSObject.tRefParameter1 = (4, 5, 8, 9)\n RxCSObject.parameter1 = 'abbc'\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def checkTrainData(cls, data):\n\n if data == None or len(data) == 0:\n raise Exception(\"No data\")\n\n if type(data[0]) != tuple:\n raise Exception(\"Not a list of tuples\")\n\n if len(data[0]) != 2 and type(data[0][0]) != str and type(data[0][1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n length = len(data[0][1])\n\n for tup in data:\n if len(tup) != 2 and type(tup[0]) != str and type(tup[1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n if len(tup[1]) != length:\n raise Exception(\"Not all elements have the same amount of data\")",
"def test_badsizevaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square((1, 2), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def __size_restriction_correct_string_tuple(self):\n\n strTestName = 'String size lower than the size of a tuple (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tRefParameter1', 'Tuple ref. parameter')\n RxCSObject.paramType('tRefParameter1', tuple)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizL('parameter1', 'tRefParameter1')\n\n RxCSObject.tRefParameter1 = (4, 5, 8, 9)\n RxCSObject.parameter1 = 'abb'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")",
"def __size_restriction_correct_tuple_parameter(self):\n\n strTestName = 'Tuple size higher than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'Tuple parameter')\n RxCSObject.paramType('parameter1', tuple)\n RxCSObject.paramSizH('parameter1', 'iRefParameter1', mul=2, add=1)\n\n RxCSObject.iRefParameter1 = 2\n RxCSObject.parameter1 = (1, 2, 3, 4, 5, 6)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def __DimSiz_restriction_correct_ndarray_tuple_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower than the size of a tuple [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimL('parameter1', 'tParameter1', 'rows', 0, pedantic=1)\n\n RxCSObject.tParameter1 = (3, 4, 5, 6, 7)\n RxCSObject.parameter1 = np.random.randn(4, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def __DimSiz_restriction_incorrect_ndarray_tuple_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array equals the size of a tuple [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 'tParameter1', 'columns', 1, pedantic=1)\n\n RxCSObject.tParameter1 = (3, 4, 5, 6, 7)\n RxCSObject.parameter1 = np.random.randn(4, 2)\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)",
"def test__tuple_raise_dimension_error(N):\n dummy_kernel_size = None\n\n with pytest.raises(ValueError):\n utils._tuple(dummy_kernel_size, N)",
"def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())",
"def _check_shape(shape):\n if type(shape) == int:\n shape = (shape, shape)\n check_odd(shape, 'psf shape')\n return shape",
"def _check_sql_args(self, sql_args):\n # Check that sql arguments have the correct type\n if sql_args and type(sql_args) not in [tuple, list]:\n raise TypeError(\"sql_args should be tuple or list. Found %s \" %\n type(sql_args))",
"def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)",
"def __size_restriction_incorrect_vector_tuple(self):\n\n strTestName = 'Vector size equal to the size of a tuple (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tRefParameter1', 'Tuple ref. parameter')\n RxCSObject.paramType('tRefParameter1', tuple)\n\n # Now, let me define a Numpy vector\n RxCSObject.paramAddMan('parameter1', 'Numpy array 1D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizEq('parameter1', 'tRefParameter1', mul=2)\n\n RxCSObject.tRefParameter1 = (0, 1, 0, 4)\n RxCSObject.parameter1 = np.random.randn(9)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def is_tuple_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=tuple)",
"def validate_tuple(validator, data):\n if type(data) is not tuple:\n return False\n if len(validator) != len(data):\n return False\n # all elements must be valid\n return all(imap(validate_common, validator, data))",
"def test_16_tuple_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, ())\n self.assertEqual(\n \"height must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle((1, 2, 3), 2)\n self.assertEqual(\n \"width must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, (2, 4))\n self.assertEqual(\n \"x must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, 0, (\"hi\",))\n self.assertEqual(\n \"y must be an integer\",\n str(x.exception))",
"def test_nested_fail():\n\n @type_checked\n def _run_test(thing:(float, float)): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(12)\n\n assert error.exconly() == (\n \"TypeError: Argument length mismatch. Expected a tuple of float, float.\"\n )",
"def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions"
] |
[
"0.7021964",
"0.68438643",
"0.68385047",
"0.6774464",
"0.676999",
"0.6747268",
"0.67050594",
"0.66413397",
"0.6622299",
"0.65459746",
"0.6538224",
"0.64933",
"0.649159",
"0.64404726",
"0.64403695",
"0.64007986",
"0.6389875",
"0.63648",
"0.6346499",
"0.63307214",
"0.6302581",
"0.6301969",
"0.62699884",
"0.6242897",
"0.622503",
"0.6224092",
"0.62225914",
"0.62223625",
"0.6221717",
"0.6207256"
] |
0.7576245
|
0
|
Creates a new image with the given mode and size.
|
def new(mode, size, color=0):
_check_size(size)
if color is None:
# don't initialize
_im = Image()._new(mode, size)
return Image(_im)
if type(color).__name__ == "str":
# css3-style specifier
color = ImageColor().getcolor(color, mode)
color = ImageDraw(None)._convert_bgr2rgb(color)
_im = Image()._new(mode, size, color)
return Image(_im)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_image(storage, filename, size=(100, 100), image_mode='RGB', image_format='PNG'):\n data = BytesIO()\n PIL.Image.new(image_mode, size).save(data, image_format)\n data.seek(0)\n if not storage:\n return data\n image_file = ContentFile(data.read())\n return storage.save(filename, image_file)",
"def make_image(storage, name, width, height, format='JPEG', mode='RGB'):\n im = Image.new(mode, (width, height))\n draw = ImageDraw.Draw(im)\n draw.rectangle([0, 0, width // 2, height // 2], '#F00')\n draw.rectangle([width // 2, 0, width, height // 2], '#0F0')\n draw.rectangle([0, height // 2, width // 2, height], '#00F')\n draw.rectangle([width // 2, height // 2, width, height], '#000')\n draw.rectangle([width // 4, height // 4, 3 * width // 4, 3 * height // 4], '#FFF')\n im_bytes_io = io.BytesIO()\n im.save(im_bytes_io, format)\n im_bytes_io.seek(0)\n storage.save(name, im_bytes_io)",
"def get_image(self, size, mode='normal', state='on'):\n raise NotImplementedError",
"def new_image(self, width, height, background=None, mode=\"RGBA\"):\n self.img = PIL.Image.new(mode, (width, height), background)\n self.width,self.height = width,height\n self.drawer = aggdraw.Draw(self.img)",
"def new(self, size, fill):\n return Image(PIL.Image.new(\"RGB\", size, fill))",
"def make_image(self, mode=\"L\") -> Image:\r\n return Image.fromarray(self.fb, mode=\"L\")",
"def create_image(config, size_mb):\n\n delete_image(config)\n iotests.log(\"truncate %s --size %dMB\" % (config.image_path(), size_mb),\n filters=[iotests.filter_test_dir])\n with open(config.image_path(), \"w\") as fn:\n fn.truncate(size_mb * 1024 * 1024)",
"def model_to_image(model, size, mode='center', factor=1, center=None):\n\n x_size, y_size = _validate_image_size(size)\n\n if center is None:\n x_origin, y_origin = (0, 0)\n else:\n x_origin, y_origin = model_center_to_image_origin(center, size)\n\n return discretize_model(\n model=model,\n x_range=[x_origin, x_origin + x_size],\n y_range=[y_origin, y_origin + y_size],\n mode=mode,\n factor=factor)",
"def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out",
"def qemu_img_create(config, size_mb):\n\n opts = [\n \"key-secret=sec0\",\n \"iter-time=10\",\n \"cipher-alg=%s-%d\" % (config.cipher, config.keylen),\n \"cipher-mode=%s\" % config.mode,\n \"ivgen-alg=%s\" % config.ivgen,\n \"hash-alg=%s\" % config.hash,\n ]\n if config.ivgen_hash is not None:\n opts.append(\"ivgen-hash-alg=%s\" % config.ivgen_hash)\n\n args = [\"create\", \"-f\", \"luks\",\n \"--object\",\n (\"secret,id=sec0,data=%s,format=base64\" %\n config.first_password_base64()),\n \"-o\", \",\".join(opts),\n config.image_path(),\n \"%dM\" % size_mb]\n\n iotests.log(\"qemu-img \" + \" \".join(args), filters=[iotests.filter_test_dir])\n iotests.log(iotests.qemu_img_pipe(*args), filters=[iotests.filter_test_dir])",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)",
"def frombytes(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n _check_size(size)\r\n \r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\" and args == ():\r\n args = mode\r\n\r\n im = new(mode, size)\r\n im.frombytes(mode, size, data, decoder_name, args)\r\n return im",
"def create_image(self):\n # how many categories?\n aspect_ratio = float(4) / 3\n self.width = int(math.sqrt(aspect_ratio * self.total))\n self.height = int(self.width / aspect_ratio)\n\n img = Image.new(\"RGB\", (self.width, self.height))\n return img",
"def newimagefromshape(self, *args, **kwargs):\n return _image.image_newimagefromshape(self, *args, **kwargs)",
"def make_thumbnail(image, size=(100, 100)):\n logging.debug(image)\n\n im = create_colorblind_image(image)\n\n thumb_io = BytesIO() # create a BytesIO object\n\n im.save(thumb_io, 'PNG', quality=85) # save image to BytesIO object\n\n thumbnail = File(thumb_io, name=image.name) # create a django friendly File object\n\n return thumbnail",
"def create_image(self):\n\n self._image = 255 * np.ones((self._height, self._width, 3), np.uint8)",
"def resize(self, size):\n return Image(self.pil_image.resize(size, PIL.Image.ANTIALIAS))",
"def get_image(self, size, mode='normal', state='on'):\n qt_size = QSize(*size)\n qt_mode = MODE_MAP.get(mode, QIcon.Normal)\n qt_state = STATE_MAP.get(state, QIcon.On)\n qpixmap = self._qicon.pixmap(qt_size, qt_mode, qt_state)\n qimage = qpixmap.toImage()\n image = QtImage(qimage)\n return image",
"def generate_test_image(name, size=(36, 36)):\n return ContentFile(\n factory.django.ImageField()._make_data(\n {'width': size[0], 'height': size[1]}\n ), '{}.jpg'.format(name))",
"def __init__(self, image_size, #is_color, mean, scale,\n crop_size=0, pad=28, color='RGB',#'BGR',\n use_cutout=False,\n use_mirroring=False,\n use_random_crop=False,\n use_center_crop=False,\n use_random_gray=False):\n self.image_size = image_size\n pass",
"def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)",
"def generate_thumb(origin, size, fn):\n assert isinstance(size, int), 'Integers are expected'\n img = Image.open(origin)\n path = os.path.dirname(origin)\n\n new_img = img.resize((size, size), Image.ANTIALIAS)\n thumb_path = os.path.join(path, fn)\n new_img.save(thumb_path)\n return thumb_path",
"def get_image(image_path, width, height, mode, box = None):\n image = Image.open(image_path)\n image = image.resize([width, height], Image.BILINEAR)\n return np.array(image.convert(mode))",
"def ensure_size(self, device, size):\n if size != self.size:\n self.size = size\n usage = wgpu.TextureUsage.RENDER_ATTACHMENT | wgpu.TextureUsage.COPY_SRC\n if self.format.startswith((\"rgb\", \"bgr\")):\n usage |= wgpu.TextureUsage.TEXTURE_BINDING\n self.texture = device.create_texture(\n size=size, usage=usage, dimension=\"2d\", format=self.format\n )\n self.texture_view = self.texture.create_view()",
"def create_training_images(fn, i, p_hr, p_lr, size, qualityFactor, downsize=True):\n dest = p_lr/fn.relative_to(p_hr)\n dest.parent.mkdir(parents=True, exist_ok=True)\n img = PIL.Image.open(fn)\n if downsize:\n targ_sz = resize_to(img, size, use_min=True) # W x H\n img = img.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('RGB')\n img.save(dest.with_suffix(\".jpg\"), \"JPEG\", quality=qualityFactor)",
"def make_digit(self, mode='fixed'):\n if self.l_i != 14:\n raise ValueError('To create a digit, l_i = 14')\n\n try:\n data = loadmat('data/mnist_small.mat')\n images = data['IMAGES']\n labels = data['LABELS'][0]\n\n K, self.l_i, _ = images.shape\n\n if mode == 'fixed':\n # Chose a particular image that will work well\n k = 37 # zero\n k = 35 # two\n elif mode == 'random':\n k = np.random.randint(K)\n else:\n raise ValueError('unrecognized mode')\n\n self.reset_img()\n self.k = k\n self.img[:, :] = images[k]\n self.img_name = str(labels[k])\n\n except IOError, e:\n print e\n raise IOError(e)",
"def make_image_file(\n file_format: str,\n color_space: str,\n width: int,\n height: int,\n) -> io.BytesIO:\n image_buffer = io.BytesIO()\n image = Image.new(color_space, (width, height))\n # If this assertion ever fails, see\n # https://github.com/VWS-Python/vws-test-fixtures for what to do.\n assert color_space != 'L'\n reds = random.choices(population=range(0, 255), k=width * height)\n greens = random.choices(population=range(0, 255), k=width * height)\n blues = random.choices(population=range(0, 255), k=width * height)\n pixels = list(zip(reds, greens, blues))\n image.putdata(pixels)\n image.save(image_buffer, file_format)\n image_buffer.seek(0)\n return image_buffer",
"def frombuffer(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n _check_size(size)\r\n\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\":\r\n if args == ():\r\n args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6\r\n if args[0] in _MAPMODES:\r\n channels, depth = Image()._get_channels_and_depth(mode)\r\n im = np.frombuffer(data)\r\n im = im.reshape((size[1], size[0], channels))\r\n im = im.astype(depth)\r\n im_ = new(mode, (1, 1))\r\n im_._instance = im\r\n im_.readonly = 1\r\n return im_\r\n\r\n return frombytes(mode, size, data, decoder_name, args)",
"def create_thumb(source_fame, target_fame, target_w = 260, target_h=205):\r\n size = target_w, target_h\r\n im = Image.open(source_fame)\r\n width = im.size[0]\r\n height = im.size[1]\r\n newwidth = int(size[0])\r\n newheight = int(height*(newwidth/float(width)))\r\n if newheight > int(size[1]):\r\n newheight = int(size[1])\r\n newwidth = int(width*(newheight/float(height)))\r\n size = newwidth, newheight\r\n # Resize and save the image\r\n im.thumbnail(size, Image.ANTIALIAS)\r\n im.save(target_fame)",
"def create_image(self):\n img = cv2.imread(self.url)\n self.img = cv2.resize(img, (self.window_x, self.window_y))"
] |
[
"0.7050601",
"0.7033265",
"0.6980973",
"0.6725615",
"0.64338475",
"0.63699013",
"0.62156343",
"0.5979368",
"0.5970218",
"0.5810323",
"0.5792077",
"0.57874864",
"0.5786103",
"0.57712936",
"0.5710684",
"0.56795233",
"0.5660675",
"0.56303596",
"0.56022155",
"0.55955166",
"0.5585897",
"0.55749696",
"0.55578536",
"0.5552264",
"0.5544209",
"0.5534867",
"0.5517109",
"0.54745615",
"0.54541576",
"0.5443453"
] |
0.76179135
|
0
|
Creates an image memory from an object exporting the array interface (using the buffer protocol). If obj is not contiguous, then the tobytes method is called
|
def fromarray(obj, mode=None):
if isinstance(obj, np.ndarray):
_mode = Image()._get_mode(obj.shape, obj.dtype)
if _mode == 'RGB':
obj = cv2.cvtColor(obj, cv2.COLOR_RGB2BGR)
elif mode == "RGBA":
obj = cv2.cvtColor(obj, cv2.COLOR_RGBA2BGRA)
return Image(obj)
else:
raise TypeError("Cannot handle this data type")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _tobuffer(self, object_):\n\n raise NotImplementedError",
"def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)",
"def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)",
"def asarray(obj, itemsize=None, unicode=None, order=None):\n return array(obj, itemsize, copy=False,\n unicode=unicode, order=order)",
"def default(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.flags['C_CONTIGUOUS']:\n obj_data = obj.data\n else:\n cont_obj = np.ascontiguousarray(obj)\n assert (cont_obj.flags['C_CONTIGUOUS'])\n obj_data = cont_obj.data\n data_b64 = base64.b64encode(obj_data).decode(\"ascii\")\n return dict(__ndarray__=data_b64,\n dtype=str(obj.dtype),\n shape=obj.shape)\n # Let the base class default method raise the TypeError\n return super().default(obj)",
"def toarray(self, object_):\n\n raise NotImplementedError",
"def ShmemRawArray(type_, size_or_initializer, tag, create=True):\n if tag[0] != \"/\":\n tag = \"/%s\" % (tag,)\n\n if isinstance(size_or_initializer, int):\n type_ = type_ * size_or_initializer\n else:\n type_ = type_ * len(size_or_initializer)\n\n buffer = ShmemBufferWrapper(tag, ctypes.sizeof(type_), create=create)\n obj = type_.from_address(buffer.get_address())\n obj._buffer = buffer\n\n if not isinstance(size_or_initializer, int):\n obj.__init__(*size_or_initializer)\n\n return obj",
"def save(self, obj):\r\n if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:\r\n # Compute a hash of the object:\r\n try:\r\n self._hash.update(self._getbuffer(obj))\r\n except (TypeError, BufferError, ValueError):\r\n # Cater for non-single-segment arrays: this creates a\r\n # copy, and thus aleviates this issue.\r\n # XXX: There might be a more efficient way of doing this\r\n # Python 3.2's memoryview raise a ValueError instead of a\r\n # TypeError or a BufferError\r\n self._hash.update(self._getbuffer(obj.flatten()))\r\n\r\n # We store the class, to be able to distinguish between\r\n # Objects with the same binary content, but different\r\n # classes.\r\n if self.coerce_mmap and isinstance(obj, self.np.memmap):\r\n # We don't make the difference between memmap and\r\n # normal ndarrays, to be able to reload previously\r\n # computed results with memmap.\r\n klass = self.np.ndarray\r\n else:\r\n klass = obj.__class__\r\n # We also return the dtype and the shape, to distinguish\r\n # different views on the same data with different dtypes.\r\n\r\n # The object will be pickled by the pickler hashed at the end.\r\n obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))\r\n Hasher.save(self, obj)",
"def obj2tensor(pyobj, device='cuda'):\n storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))\n return torch.ByteTensor(storage).to(device=device)",
"def as_buffer(\n cls,\n obj: torch.Tensor,\n counts: Tuple[int] = None,\n displs: Tuple[int] = None,\n is_contiguous: Optional[bool] = None,\n ) -> List[Union[MPI.memory, Tuple[int, int], MPI.Datatype]]:\n squ = False\n if not obj.is_contiguous() and obj.ndim == 1:\n # this makes the math work below this function.\n obj.unsqueeze_(-1)\n squ = True\n\n mpi_type, elements = cls.mpi_type_and_elements_of(obj, counts, displs, is_contiguous)\n mpi_mem = cls.as_mpi_memory(obj)\n if squ:\n # the squeeze happens in the mpi_type_and_elements_of function in the case of a\n # non-contiguous 1D tensor. Squeezing it puts the memory back to where it should be\n obj.squeeze_(-1)\n return [mpi_mem, elements, mpi_type]",
"def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1",
"def as_mpi_memory(cls, obj) -> MPI.memory:\n return MPI.memory.fromaddress(obj.data_ptr(), 0)",
"def default(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.flags['C_CONTIGUOUS']:\n obj_data = obj.data\n else:\n cont_obj = np.ascontiguousarray(obj)\n assert(cont_obj.flags['C_CONTIGUOUS'])\n obj_data = cont_obj.data\n data_b64 = base64.b64encode(obj_data)\n return dict(__ndarray__=data_b64,\n dtype=str(obj.dtype),\n shape=obj.shape)\n # Let the base class default method raise the TypeError\n return json.JSONEncoder(self, obj)",
"def default(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.flags['C_CONTIGUOUS']:\n obj_data = obj.data\n else:\n cont_obj = np.ascontiguousarray(obj)\n assert(cont_obj.flags['C_CONTIGUOUS'])\n obj_data = cont_obj.data\n data_b64 = base64.b64encode(obj_data)\n return dict(__ndarray__=data_b64,\n dtype=str(obj.dtype),\n shape=obj.shape)\n # Let the base class default method raise the TypeError\n return json.JSONEncoder(self, obj)",
"def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v7 = val1.header\n buff.write(_struct_I.pack(_v7.seq))\n _v8 = _v7.stamp\n _x = _v8\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v7.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.confidence))\n _v9 = val1.roi\n _x = _v9\n buff.write(_struct_4IB.pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def save(self, obj):\n if isinstance(obj, self.np.ndarray):\n # Compute a hash of the object:\n try:\n self._hash.update(self.np.getbuffer(obj))\n except TypeError:\n # Cater for non-single-segment arrays: this creates a\n # copy, and thus aleviates this issue.\n # XXX: There might be a more efficient way of doing this\n self._hash.update(self.np.getbuffer(obj.flatten()))\n\n # We store the class, to be able to distinguish between\n # Objects with the same binary content, but different\n # classes.\n if self.coerce_mmap and isinstance(obj, self.np.memmap):\n # We don't make the difference between memmap and\n # normal ndarrays, to be able to reload previously\n # computed results with memmap.\n klass = self.np.ndarray\n else:\n klass = obj.__class__\n # We also return the dtype and the shape, to distinguish\n # different views on the same data with different dtypes.\n\n # The object will be pickled by the pickler hashed at the end.\n obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))\n Hasher.save(self, obj)",
"def thrift_obj_in_bytes(thrift_obj): # pragma: no cover\n trans = TMemoryBuffer()\n thrift_obj.write(TBinaryProtocol(trans))\n\n return bytes(trans.getvalue())",
"def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_2H().pack(_x.image_width, _x.image_height))\n length = len(self.Hlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Hlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.Vlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Vlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n buff.write(_get_struct_H().pack(self.PFPS))\n length = len(self.regions)\n buff.write(_struct_I.pack(length))\n for val1 in self.regions:\n _v5 = val1.color\n _x = _v5\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n _v6 = val1.moment\n _x = _v6\n buff.write(_get_struct_10f().pack(_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03))\n _x = self\n buff.write(_get_struct_2H().pack(_x.box_width, _x.box_height))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)",
"def object_to_bytes(obj):\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()",
"def default(self, obj):\n if isinstance(obj, np.ndarray):\n if obj.flags['C_CONTIGUOUS']:\n obj_data = obj.data\n else:\n cont_obj = np.ascontiguousarray(obj)\n assert(cont_obj.flags['C_CONTIGUOUS'])\n obj_data = cont_obj.data\n data_b64 = base64.b64encode(obj_data)\n return dict(__ndarray__=data_b64,\n dtype=str(obj.dtype),\n shape=obj.shape)\n elif isinstance(obj, np.generic):\n return np.asscalar(obj)\n # Let the base class default method raise the TypeError\n return json.JSONEncoder(self, obj)",
"def read_image_as_numpy(bytes_obj: Optional[bytes]=None) ->Optional[torch.Tensor]:\n try:\n with BytesIO(bytes_obj) as buffer:\n image = np.load(buffer)\n return torch.from_numpy(image)\n except Exception as e:\n warnings.warn(f'Failed to read image from numpy file. Original exception: {e}')\n return None",
"def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)",
"def as_bytes(array_or_image,mimetype='image/png'):\n buf = StringIO()\n fmt = mimetype2format(mimetype)\n im = as_pil(array_or_image).save(buf,fmt)\n return buf.getvalue()",
"def save(self, obj):\r\n if self.np is not None and type(obj) in (self.np.ndarray,\r\n self.np.matrix, self.np.memmap):\r\n size = obj.size * obj.itemsize\r\n if self.compress and size < self.cache_size * _MEGA:\r\n # When compressing, as we are not writing directly to the\r\n # disk, it is more efficient to use standard pickling\r\n if type(obj) is self.np.memmap:\r\n # Pickling doesn't work with memmaped arrays\r\n obj = self.np.asarray(obj)\r\n return Pickler.save(self, obj)\r\n self._npy_counter += 1\r\n try:\r\n filename = '%s_%02i.npy' % (self._filename,\r\n self._npy_counter)\r\n # This converts the array in a container\r\n obj, filename = self._write_array(obj, filename)\r\n self._filenames.append(filename)\r\n except:\r\n self._npy_counter -= 1\r\n # XXX: We should have a logging mechanism\r\n print('Failed to save %s to .npy file:\\n%s' % (\r\n type(obj),\r\n traceback.format_exc()))\r\n return Pickler.save(self, obj)",
"def _packb2(obj, **options):\n fp = io.BytesIO()\n _pack2(obj, fp, **options)\n return fp.getvalue()",
"def fromarray(self, *args, **kwargs):\n return _image.image_fromarray(self, *args, **kwargs)",
"def image2blob(image):\n # print image.shape,image.dtype\n if image.dtype!=numpy.dtype('B'):\n image = image*255.0+0.5\n image = numpy.array(image,'B')\n assert image.dtype==numpy.dtype('B'),image.dtype\n d0,d1 = image.shape\n assert d0>=0 and d0<256\n assert d1>=0 and d1<256\n s = numpy.zeros(d0*d1+2,'B')\n s[0] = d0\n s[1] = d1\n s[2:] = image.flat\n return buffer(s)",
"def read_buffer1d(fobj, dtype, endian=''):\n\n (npix,) = struct.unpack(endian + 'i', fobj.read(4))\n if dtype == 'float':\n arr = npy.fromfile(file=fobj, dtype=npy.float32, count=npix)\n elif dtype == 'double':\n arr = npy.fromfile(file=fobj, dtype=npy.float64, count=npix)\n else:\n raise CppError('read_buffer1d: do not recogniise dtype = ' + str(dtype))\n return arr",
"def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.graspable_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.graspable_objects:\n _x = val1.reference_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(val1.potential_models)\n buff.write(_struct_I.pack(length))\n for val2 in val1.potential_models:\n _x = val2.model_id\n buff.write(_get_struct_i().pack(_x))\n _v63 = val2.type\n _x = _v63.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v63.db\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v64 = val2.pose\n _v65 = _v64.header\n _x = _v65.seq\n buff.write(_get_struct_I().pack(_x))\n _v66 = _v65.stamp\n _x = _v66\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v65.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v67 = _v64.pose\n _v68 = _v67.position\n _x = _v68\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v69 = _v67.orientation\n _x = _v69\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n _x = val2.confidence\n buff.write(_get_struct_f().pack(_x))\n _x = val2.detector_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v70 = val1.cluster\n _v71 = _v70.header\n _x = _v71.seq\n buff.write(_get_struct_I().pack(_x))\n _v72 = _v71.stamp\n _x = _v72\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v71.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(_v70.points)\n buff.write(_struct_I.pack(length))\n for val3 in _v70.points:\n _x = val3\n buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))\n length = len(_v70.channels)\n buff.write(_struct_I.pack(length))\n for val3 in _v70.channels:\n _x = val3.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(val3.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val3.values.tostring())\n _v73 = val1.region\n _v74 = _v73.cloud\n _v75 = _v74.header\n _x = _v75.seq\n buff.write(_get_struct_I().pack(_x))\n _v76 = _v75.stamp\n _x = _v76\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v75.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v74\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n length = len(_v74.fields)\n buff.write(_struct_I.pack(length))\n for val4 in _v74.fields:\n _x = val4.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val4\n buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))\n _x = _v74\n buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))\n _x = _v74.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v74.is_dense\n buff.write(_get_struct_B().pack(_x))\n length = len(_v73.mask)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(_v73.mask.tostring())\n _v77 = _v73.image\n _v78 = _v77.header\n _x = _v78.seq\n buff.write(_get_struct_I().pack(_x))\n _v79 = _v78.stamp\n _x = _v79\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v78.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v77\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v77.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v77\n buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))\n _x = _v77.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v80 = _v73.disparity_image\n _v81 = _v80.header\n _x = _v81.seq\n buff.write(_get_struct_I().pack(_x))\n _v82 = _v81.stamp\n _x = _v82\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v81.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v80\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v80.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v80\n buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))\n _x = _v80.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v83 = _v73.cam_info\n _v84 = _v83.header\n _x = _v84.seq\n buff.write(_get_struct_I().pack(_x))\n _v85 = _v84.stamp\n _x = _v85\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v84.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v83\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v83.distortion_model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(_v83.D)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(_v83.D.tostring())\n buff.write(_v83.K.tostring())\n buff.write(_v83.R.tostring())\n buff.write(_v83.P.tostring())\n _x = _v83\n buff.write(_get_struct_2I().pack(_x.binning_x, _x.binning_y))\n _v86 = _v83.roi\n _x = _v86\n buff.write(_get_struct_4IB().pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n _v87 = _v73.roi_box_pose\n _v88 = _v87.header\n _x = _v88.seq\n buff.write(_get_struct_I().pack(_x))\n _v89 = _v88.stamp\n _x = _v89\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v88.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v90 = _v87.pose\n _v91 = _v90.position\n _x = _v91\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v92 = _v90.orientation\n _x = _v92\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n _v93 = _v73.roi_box_dims\n _x = _v93\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = val1.collision_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs))\n _x = self.image.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2I().pack(_x.image.height, _x.image.width))\n _x = self.image.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_BI().pack(_x.image.is_bigendian, _x.image.step))\n _x = self.image.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs))\n _x = self.camera_info.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2I().pack(_x.camera_info.height, _x.camera_info.width))\n _x = self.camera_info.distortion_model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.camera_info.D)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.camera_info.D.tostring())\n buff.write(self.camera_info.K.tostring())\n buff.write(self.camera_info.R.tostring())\n buff.write(self.camera_info.P.tostring())\n _x = self\n buff.write(_get_struct_6IB().pack(_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify))\n length = len(self.meshes)\n buff.write(_struct_I.pack(length))\n for val1 in self.meshes:\n length = len(val1.triangles)\n buff.write(_struct_I.pack(length))\n for val2 in val1.triangles:\n buff.write(val2.vertex_indices.tostring())\n length = len(val1.vertices)\n buff.write(_struct_I.pack(length))\n for val2 in val1.vertices:\n _x = val2\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = self\n buff.write(_get_struct_7d().pack(_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))"
] |
[
"0.6534533",
"0.6064485",
"0.6064485",
"0.6059072",
"0.59939635",
"0.59344447",
"0.5844922",
"0.57877684",
"0.57358426",
"0.56880414",
"0.56742525",
"0.56553286",
"0.5627507",
"0.5627507",
"0.5589481",
"0.55763996",
"0.55743444",
"0.55666006",
"0.5551432",
"0.5528058",
"0.5524803",
"0.55073905",
"0.54726934",
"0.54585916",
"0.54247415",
"0.54172134",
"0.5383453",
"0.5381461",
"0.5330139",
"0.5325309"
] |
0.6243654
|
1
|
Alpha composite im2 over im1.
|
def alpha_composite(im1, im2):
r1, g1, b1, a1 = Image().split(im1)
r2, g2, b2, a2 = Image().split(im2)
alphacomp = np.zeros(im1.shape, dtype=im1.dtype)
im3 = composite(alphacomp, im1, a1)
alphacomp = np.zeros(im2.shape, dtype=im2.dtype)
im4 = composite(alphacomp, im2, a2)
return blend(im3, im4, 0.5)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _blend(img1, img2, alpha):\n return img1.mul(alpha).add(1 - alpha, img2)",
"def overlay_two_imgs(img1, img2, alpha=0.5):\n # Validate alpha\n if alpha > 1 or alpha < 0:\n fatal_error(\"The value of alpha should be in the range of (0,1)!\")\n\n # Validate image sizes are the same\n size_img1 = img1.shape[0:2]\n size_img2 = img2.shape[0:2]\n if size_img1 != size_img2:\n fatal_error(f\"The height/width of img1 ({size_img1}) needs to match img2 ({size_img2}).\")\n\n # Copy the input images\n img1_ = np.copy(img1)\n img2_ = np.copy(img2)\n # If the images are grayscale convert to BGR\n if len(img1_.shape) == 2:\n img1_ = cv2.cvtColor(img1_, cv2.COLOR_GRAY2BGR)\n if len(img2_.shape) == 2:\n img2_ = cv2.cvtColor(img2_, cv2.COLOR_GRAY2BGR)\n\n # initialize the output image\n out_img = np.zeros(size_img1 + (3,), dtype=np.uint8)\n\n # blending\n out_img[:, :, :] = (alpha * img1_[:, :, :]) + ((1 - alpha) * img2_[:, :, :])\n\n params.device += 1\n if params.debug == 'print':\n print_image(out_img, os.path.join(params.debug_outdir, str(params.device) + '_overlay.png'))\n elif params.debug == 'plot':\n plot_image(out_img)\n return out_img",
"def alpha_blending(im1, im2, window_size=0.5):\n assert(im1.shape == im2.shape)\n\n columns = im1.shape[1]\n rows = im1.shape[0]\n transition_size = int(columns * window_size)\n im1_size = (columns - transition_size) // 2\n im2_size = columns - transition_size - im1_size\n \n # alpha is a matrix which describes how much of im1 we want to display\n alpha = np.concatenate((np.ones((im1_size)), np.linspace(1, 0, transition_size), np.zeros((im2_size))))\n \n new_im = im1.copy()\n for x in range(rows):\n # Calculates Iblend(x,y) =α(x,y)Ileft(x,y) + (1−α(x,y))Iright(x,y)\n new_im[x] = (im1[x] * alpha[:, None]) + ((np.ones([columns]) - alpha)[:, None] * im2[x])\n\n return new_im",
"def overlay_alpha_images(img1, img2, keepalpha=True, dtype=np.float32,\n impl='inplace'):\n rgb1, alpha1 = _prep_rgb_alpha(img1, dtype=dtype)\n rgb2, alpha2 = _prep_rgb_alpha(img2, dtype=dtype)\n\n # Perform the core alpha blending algorithm\n if impl == 'simple':\n rgb3, alpha3 = _alpha_blend_simple(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'inplace':\n rgb3, alpha3 = _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr1':\n rgb3, alpha3 = _alpha_blend_numexpr1(rgb1, alpha1, rgb2, alpha2)\n elif impl == 'numexpr2':\n rgb3, alpha3 = _alpha_blend_numexpr2(rgb1, alpha1, rgb2, alpha2)\n else:\n raise ValueError('unknown impl={}'.format(impl))\n\n if keepalpha:\n raster = np.dstack([rgb3, alpha3[..., None]])\n # Note: if we want to output a 255 img we could do something like this\n # out = np.zeros_like(img1)\n # out[..., :3] = rgb3\n # out[..., 3] = alpha3\n else:\n raster = rgb3\n return raster",
"def hard_blending(im1, im2):\n assert(im1.shape == im2.shape)\n h, w, c = im1.shape\n new_im = im2.copy()\n new_im[:,:(w//2),:] = im1[:,:(w//2),:]\n return new_im",
"def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):\r\n\r\n if not isinstance(source, (list, tuple)):\r\n raise ValueError(\"Source must be a tuple\")\r\n if not isinstance(dest, (list, tuple)):\r\n raise ValueError(\"Destination must be a tuple\")\r\n if not len(source) in (2, 4):\r\n raise ValueError(\"Source must be a 2 or 4-tuple\")\r\n if not len(dest) == 2:\r\n raise ValueError(\"Destination must be a 2-tuple\")\r\n if min(source) < 0:\r\n raise ValueError(\"Source must be non-negative\")\r\n if min(dest) < 0:\r\n raise ValueError(\"Destination must be non-negative\")\r\n\r\n channels, depth = self._get_channels_and_depth(im)\r\n _mode = self._get_mode(im.shape, im.dtype)\r\n _im = self._new(_mode, (im.shape[1], im.shape[0]))\r\n if len(source) == 2:\r\n source = source + _im.size\r\n\r\n # over image, crop if it's not the whole thing.\r\n if source == (0, 0) + _im.size:\r\n overlay = _im\r\n else:\r\n overlay = _im.crop(source)\r\n\r\n # target for the paste\r\n box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)\r\n\r\n # destination image. don't copy if we're using the whole image.\r\n if box == (0, 0) + self.size:\r\n background = self._instance\r\n else:\r\n background = self.crop(box)\r\n\r\n result = alpha_composite(background, overlay)\r\n self.paste(result, box)",
"def _alpha_blend_inplace(rgb1, alpha1, rgb2, alpha2):\n rgb3 = np.empty_like(rgb1)\n temp_rgb = np.empty_like(rgb1)\n alpha3 = np.empty_like(alpha1)\n temp_alpha = np.empty_like(alpha1)\n\n # hold (1 - alpha1)\n np.subtract(1, alpha1, out=temp_alpha)\n\n # alpha3\n np.copyto(dst=alpha3, src=temp_alpha)\n np.multiply(alpha2, alpha3, out=alpha3)\n np.add(alpha1, alpha3, out=alpha3)\n\n # numer1\n np.multiply(rgb1, alpha1[..., None], out=rgb3)\n\n # numer2\n np.multiply(alpha2, temp_alpha, out=temp_alpha)\n np.multiply(rgb2, temp_alpha[..., None], out=temp_rgb)\n\n # (numer1 + numer2)\n np.add(rgb3, temp_rgb, out=rgb3)\n\n # removing errstate is actually a significant speedup\n with np.errstate(invalid='ignore'):\n np.divide(rgb3, alpha3[..., None], out=rgb3)\n if not np.all(alpha3):\n rgb3[alpha3 == 0] = 0\n return rgb3, alpha3",
"def _crossing_over(self, img_ext_1, img_ext_2) -> ExtendedImage:\n # Copy first extended image\n new_member = img_ext_1.img.copy()\n height = img_ext_2.get_height()\n\n # Add the right half of the 2nd image to copy of the 1st image\n new_member[0:, (height // 2):, :3] = img_ext_2.img[0:, (height // 2):, :3]\n return ExtendedImage(new_member)",
"def _alpha_blend_simple(rgb1, alpha1, rgb2, alpha2):\n c_alpha1 = (1.0 - alpha1)\n alpha3 = alpha1 + alpha2 * c_alpha1\n\n numer1 = (rgb1 * alpha1[..., None])\n numer2 = (rgb2 * (alpha2 * c_alpha1)[..., None])\n with np.errstate(invalid='ignore'):\n rgb3 = (numer1 + numer2) / alpha3[..., None]\n rgb3[alpha3 == 0] = 0\n return rgb3, alpha3",
"def merge_rgba_cv2(front_img, back_img):\n assert front_img.shape == back_img.shape\n if front_img.dtype == np.uint8:\n front_img = front_img.astype(np.float32) / 255.0\n if back_img.dtype == np.uint8:\n back_img = back_img.astype(np.float32) / 255.0\n result_img = np.zeros(front_img.shape, dtype=np.float32)\n result_img[:, :, 3] = front_img[:, :, 3] + back_img[:, :, 3] * (1 - front_img[:, :, 3])\n result_img[:, :, :3] = (front_img[:, :, :3] * front_img[:, :, 3:] +\n back_img[:, :, :3] * back_img[:, :, 3:] * (1 - front_img[:, :, 3:])) /\\\n result_img[:, :, 3:]\n result_img = (result_img * 255.0).astype(np.uint8)\n return result_img",
"def overlay(self, img2_path=\"./hurr.png\"):\n img2 = self.imread(img2_path)\n\n self.img = self.overlay_transparent(self.img, img2, 0, 0)\n\n self.edits.append(f\"overlay:{os.path.basename(img2_path)}\")\n return self",
"def overlay_image_alpha(self,img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])",
"def blend(ch1, ch2):\n if ch1.mode != \"LA\" or ch2.mode != \"LA\":\n raise ValueError(\"Images must be in LA\")\n src = ch2\n dst = ch1\n outa = src.channels[1] + dst.channels[1] * (1 - src.channels[1])\n dst.channels[0] = (src.channels[0] * src.channels[1] +\n dst.channels[0] * dst.channels[1] *\n (1 - src.channels[1])) / outa\n dst.channels[0][outa == 0] = 0\n dst.channels[1] = outa",
"def image_overlay(image, image_blend, alpha=0.2, cmap_image=\"Greys_r\", cmap_blend=\"jet\"):\n plt.imshow(image, cmap=cmap_image)\n plt.imshow(image_blend, cmap=cmap_blend, interpolation=\"none\", alpha=alpha)",
"def alpha_composite(front, back):\n front = np.asarray(front)\n back = np.asarray(back)\n result = np.empty(front.shape, dtype='float')\n alpha = np.index_exp[:, :, 3:]\n rgb = np.index_exp[:, :, :3]\n falpha = front[alpha] / 255.0\n balpha = back[alpha] / 255.0\n result[alpha] = falpha + balpha * (1 - falpha)\n old_setting = np.seterr(invalid='ignore')\n result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]\n np.seterr(**old_setting)\n result[alpha] *= 255\n np.clip(result, 0, 255)\n # astype('uint8') maps np.nan and np.inf to 0\n result = result.astype('uint8')\n result = Image.fromarray(result, 'RGBA')\n return result",
"def get_concat_h(im1, im2):\n dst = Image.new('RGB', (im1.width + im2.width, im1.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (im1.width, 0))\n return dst",
"def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3",
"def alpha_composite_with_color(image, color=(255, 255, 255)):\n back = Image.new('RGBA', size=image.size, color=color + (255,))\n return alpha_composite(image, back)",
"def pyrBlend(img_1: np.ndarray, img_2: np.ndarray, mask: np.ndarray, levels: int) -> (np.ndarray, np.ndarray):\r\n img_1_lap = laplaceianReduce(img_1, levels)\r\n img_2_lap = laplaceianReduce(img_2, levels)\r\n mask_gauss = gaussianPyr(mask)\r\n\r\n merge = (img_1_lap[levels - 1] * mask_gauss[levels - 1]) + ((1 - mask_gauss[levels - 1]) * img_2_lap[levels - 1])\r\n gaussian = gaussianKer(5)\r\n for i in range(levels - 2, -1, -1):\r\n merge = gaussExpand(merge, gaussian)\r\n merge = merge + (img_1_lap[i] * mask_gauss[i]) + ((1 - mask_gauss[i]) * img_2_lap[i])\r\n\r\n img_1 = cropPic(img_1, levels)\r\n img_2 = cropPic(img_2, levels)\r\n naive = (img_1 * mask_gauss[0]) + ((1 - mask_gauss[0]) * img_2)\r\n\r\n\r\n return naive, merge",
"def pyrBlend(img_1: np.ndarray, img_2: np.ndarray, mask: np.ndarray, levels: int) -> (np.ndarray, np.ndarray):\r\n naive_blend = img_1 * mask + img_2 * (1 - mask)\r\n img_1_lp = laplaceianReduce(img_1, levels)\r\n img_2_lp = laplaceianReduce(img_2, levels)\r\n mask_lp = gaussianPyr(mask, levels)\r\n img_2_lp.reverse()\r\n img_1_lp.reverse()\r\n mask_lp.reverse()\r\n r_imgs = []\r\n for i in range(0, len(img_2_lp)):\r\n new_img = mask_lp[i] * img_1_lp[i] + (1-mask_lp[i]) * img_2_lp[i]\r\n r_imgs.append(new_img)\r\n r_imgs.reverse()\r\n\r\n return naive_blend, laplaceianExpand(r_imgs)",
"def overlay_im_to_background(im_back, im_over, x_offset, y_offset):\n y1, y2 = y_offset, y_offset + im_over.shape[0]\n x1, x2 = x_offset, x_offset + im_over.shape[1]\n\n alpha_s = im_over[:, :, 3] / 255.0\n alpha_l = 1.0 - alpha_s\n\n for c in range(0, 3):\n im_back[y1:y2, x1:x2, c] = (alpha_s * im_over[:, :, c] +\n alpha_l * im_back[y1:y2, x1:x2, c])",
"def zover(input_a, input_b):\n zcomp = input_b.duplicate()\n\n ImageBufAlgo.zover(zcomp, input_a, input_b)\n\n if zcomp.has_error:\n print \"Error merging zover:\", zcomp.geterror()\n\n return zcomp",
"def alpha_blend(input_image, segmentation_mask, alpha=0.5):\n blended = np.zeros(input_image.size, dtype=np.float32)\n blended = input_image * alpha + segmentation_mask * (1 - alpha)\n return blended",
"def blend_images(primary_image, secondary_image, alpha, saturation_enhance,\n contrast_enhance):\n # TODO: remove colors of blended image\n im_primary = Image.open(primary_image)\n # im_secondary = Image.open(secondary_image)\n\n resized_secondary_image = resize_secondary_image(primary_image,\n secondary_image)\n\n # TODO add a smarter way to change color saturation of single images\n saturation = ImageEnhance.Color(resized_secondary_image)\n resized_secondary_image = saturation.enhance(0.0)\n blended_image = Image.blend(im_primary, resized_secondary_image, alpha)\n\n # Change saturation and contrast of image\n saturation = ImageEnhance.Color(blended_image)\n contrast = ImageEnhance.Contrast(blended_image)\n\n blended_image = saturation.enhance(saturation_enhance)\n blended_image = contrast.enhance(contrast_enhance)\n\n return blended_image",
"def combine(imA, imB):\n # check image sizes\n if imA.size != imB.size:\n raise Exception(\"cannot combine two images with different sizes\")\n \n height, width = imA.size\n total_width = width * 2\n imC = Image.new('RGB', (total_width, height))\n x_offset = 0\n for im in [imA, imB]:\n imC.paste(im, (x_offset, 0))\n x_offset += im.size[1]\n return imC",
"def over(input_a, input_b):\n\n comp = input_b.duplicate()\n input_a.premult()\n ImageBufAlgo.over(comp, input_a, input_b)\n\n if comp.has_error:\n print \"Error merging over:\", comp.geterror()\n\n return comp",
"def _alpha_blend_numexpr1(rgb1, alpha1, rgb2, alpha2):\n import numexpr\n alpha1_ = alpha1[..., None] # NOQA\n alpha2_ = alpha2[..., None] # NOQA\n alpha3 = numexpr.evaluate('alpha1 + alpha2 * (1.0 - alpha1)')\n alpha3_ = alpha3[..., None] # NOQA\n rgb3 = numexpr.evaluate('((rgb1 * alpha1_) + (rgb2 * alpha2_ * (1.0 - alpha1_))) / alpha3_')\n rgb3[alpha3 == 0] = 0",
"def paste(self, other):\n r, g, b, alpha = other.pil_image.split()\n pil_image = self.pil_image.copy()\n pil_image.paste(other.pil_image, mask=alpha)\n return kurt.Image(pil_image)",
"def transform_images(img1,img2):",
"def PImageAdd (in1Image, in2Image, outImage, err, \\\n chkPos=False, factor1=1.0, factor2=1.0):\n ################################################################\n # Checks\n if not Image.PIsA(in1Image):\n raise TypeError,\"in1Image MUST be a Python Obit Image\"\n if not Image.PIsA(in2Image):\n raise TypeError,\"in2Image MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n #\n # Clone output from input 1\n in1Image.Clone (outImage, err)\n # Open images\n Image.POpen (in1Image, Image.READONLY, err)\n Image.POpen (in2Image, Image.READONLY, err)\n Image.POpen (outImage, Image.WRITEONLY, err)\n # Get input descriptor to see how many planes\n in1Desc = in1Image.Desc\n in2Desc = in2Image.Desc\n # Check compatibility\n ImageDesc.PCheckCompat (in1Desc, in2Desc, chkPos=chkPos)\n inDescDict = in1Desc.Dict\n ndim = inDescDict[\"naxis\"]\n inNaxis = inDescDict[\"inaxes\"]\n # Work buffer\n inImageArray = Image.PGetFArray(in1Image)\n ImageBuffer1 = FArray.PCopy(inImageArray, err)\n ImageBuffer2 = FArray.PCopy(inImageArray, err)\n\n # list of planes to loop over (0-rel)\n if (ndim>0) and (inNaxis[2]>0): \n planes = range(inNaxis[2])\n else:\n planes = [0]\n \n # Loop over planes\n for iPlane in planes:\n doPlane = [iPlane+1,1,1,1,1]\n # Get image planes\n Image.PGetPlane (in1Image, ImageBuffer1, doPlane, err)\n Image.PGetPlane (in2Image, ImageBuffer2, doPlane, err)\n\n # Scale\n FArray.PSMul(ImageBuffer1, factor1)\n FArray.PSMul(ImageBuffer2, factor2)\n\n # Add\n FArray.PAdd(ImageBuffer1, ImageBuffer2, ImageBuffer2)\n\n # Write output\n Image.PPutPlane (outImage, ImageBuffer2, doPlane, err)\n\n # end loop over planes\n # Close\n in2Image.Close(err)\n in2Image.Close(err)\n outImage.Close(err)\n # Error?\n if err.isErr:\n OErr.printErrMsg(err, \"Error subtracting Images\")\n # Write history\n in1History = History.History(\"history\", in1Image.List, err)\n in2History = History.History(\"history\", in2Image.List, err)\n outHistory = History.History(\"history\", outImage.List, err)\n # Copy Histories\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1, \"/ PImageAdd Input 1 History\",err)\n outHistory.Close(err)\n info = in1Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in1History, outHistory, err)\n #Not needed History.PCopy(in1History, outHistory, err)\n outHistory.Open(History.READWRITE, err)\n outHistory.WriteRec(-1, \"/ \",err)\n outHistory.WriteRec(-1, \"/ ****** PImageAdd Input 2 History\",err)\n outHistory.Close(err)\n info = in2Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in2History, outHistory, err)\n History.PCopy(in2History, outHistory, err)\n # Add this programs history\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor1 = \"+str(factor1),err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor2 = \"+str(factor2),err)\n outHistory.Close(err)"
] |
[
"0.7826415",
"0.76921284",
"0.7499772",
"0.73837155",
"0.71249557",
"0.6970056",
"0.69635725",
"0.6951903",
"0.68631494",
"0.6629562",
"0.65333754",
"0.652154",
"0.6500405",
"0.6471415",
"0.63709354",
"0.636076",
"0.6339621",
"0.63121444",
"0.62808",
"0.62233764",
"0.62221324",
"0.61766726",
"0.6174878",
"0.61382633",
"0.60920644",
"0.6062207",
"0.6046536",
"0.604353",
"0.6039521",
"0.6035181"
] |
0.85184
|
0
|
Take a question file, and returning a list of `Question` objects.
|
def get_questions(qn_filepath):
array = []
with open(qn_filepath, 'rb') as file:
for line in file:
array.append(line)
delete = ['<top>\r\n', '\r\n', '<desc> Description:\r\n', '</top>\r\n']
for i in delete:
array = list(filter((i).__ne__, array))
final_array = []
for i in range(len(array)):
if i % 2 == 0:
array[i] = array[i].split(' ')[-1].strip()
array[i+1] = array[i+1].strip().split('?')[0]
qn = Question(qn_id=int(array[i]), qn_text=array[i+1])
final_array.append(qn)
return final_array
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_questions(file_name='questions.csv'):\n questions = []\n\n with open(file_name) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n for index, row in enumerate(csvreader):\n questions.append(Question(int(row[0]),\n row[1],\n row[2],\n row[3],\n row[4],\n row[5],\n row[6]))\n\n return questions",
"def from_text(question_file, answer_file):\n with open(question_file, 'r', encoding='UTF8', errors='replace') as f:\n questions = f.readlines()\n\n with open(answer_file, 'r', encoding='UTF8', errors='replace') as f:\n answers = f.readlines()\n\n return QuestionSet([Question(question, answer) for question, answer in zip(questions, answers)])",
"def read_dataFile(fileName):\n try:\n questionList = []\n prompt = None\n with open(fileName, 'r') as f:\n for i in f.readlines():\n i = i.strip('\\n')\n question = {}\n if len(i.split(',')) == 1:\n if i != prompt:\n prompt = i\n else:\n question['prompt'] = prompt\n question['flag'] = False # Has the question been answered?\n question['title'] = i.split(',')[0]\n if len(i.split(',')) > 2:\n question['answer'] = list(i.split(',')[1:])\n question['multiple_answers'] = True # There are multiple answers to the question.\n else:\n question['answer'] = i.split(',')[1]\n question['multiple_answers'] = False\n if question:\n questionList.append(question)\n return questionList\n except Exception:\n print('Error reading data file')",
"def parse_game_data(file_name : str) -> List[Topic]:\n # Opens file.\n data = open(file_name, 'r')\n\n # Sets up variables.\n current_topic = None\n parsed_data = []\n\n # Loops over each line in the file.\n for line in data:\n \n # Creates new topic if there is none.\n if current_topic is None:\n current_topic = Topic(line[:-1])\n\n # Saves topic if encountered empty line.\n elif line == '\\n':\n parsed_data.append(current_topic)\n current_topic = None\n\n # Parses question line and adds to topic.\n else:\n question, answer, points = parse_line(line)\n new_question = Question(question, answer, points)\n current_topic.questions.append(new_question)\n\n return parsed_data",
"def read_quiz_questions() -> pd.DataFrame:\n data_path = './data/quiz_question_data.txt'\n if not os.path.exists(data_path):\n raise Exception(f'Data not found in {data_path}')\n\n with open(data_path, 'r') as f:\n questions = f.read()\n\n return pd.DataFrame(ast.literal_eval(questions))",
"def read_analogies_file(eval_file='questions-words.txt', word2id={}):\n questions = []\n questions_skipped = 0\n with open(eval_file, \"rb\") as analogy_f:\n for line in analogy_f:\n if line.startswith(b\":\"): # Skip comments.\n continue\n words = line.strip().lower().split(b\" \") # lowercase\n ids = [word2id.get(w.strip()) for w in words]\n if None in ids or len(ids) != 4:\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", eval_file)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n analogy_questions = np.array(questions, dtype=np.int32)\n return analogy_questions",
"def read_in_file(self):\n try: # we are opening the file, this could fail..\n for line in open(self.question_file, 'r').readlines(): # Open the file and read in all the lines and put them in an array\n if line == '\\n': # if the line is simply equal to \"\\n\"\n continue # \"continue\" means \"don't continue execution, go back to the top of the loop\n else: # the line simply isn't \"\\n\" so let's append it.\n self.question_data.append(line.rstrip()) # append the line to the self.question_data array, strip the \\n off\n except OSError as err: # Let's capture the exception catch\n print(\"Problem opening question file: %s\" % self.question_file)\n fatal(\"System Error {0}\".format(err), -1) # let's print FATAL and the actual exception catch msg and exit -1",
"def _read_file(self, input_file):\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as file:\n examples = []\n for line in file:\n data = line.strip().split(\"_!_\")\n example = InputExample(\n guid=data[0], label=data[1], text_a=data[3])\n examples.append(example)\n\n return examples",
"def readQrels(fileName):\n ln = 0\n res = []\n\n with open(fileName) as f:\n for line in tqdm(f, desc='loading qrels (by line)', leave=False):\n ln += 1\n line = line.strip()\n if not line:\n continue\n try:\n e = parseQrelEntry(line)\n res.append(e)\n except:\n raise Exception('Error parsing QRELs in line: %d' % ln)\n\n return res",
"def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods",
"def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]",
"def _read_analogies(self):\n questions = []\n questions_skipped = 0\n with open(self._options.eval_data, \"rb\") as analogy_f:\n for line in analogy_f:\n if line.startswith(\":\"): # Skip comments.\n continue\n words = line.strip().lower().split(\" \")\n # print words\n ids = [self._cate2id.get(w.strip()) for w in words]\n # print ids\n if None in ids or len(ids) != 4:\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", self._options.eval_data)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n questions = np.array(questions, dtype=np.int32)\n self._analogy_questions = questions\n self._target_field = np.array(\n list(set(questions[:, 3])), dtype=np.int32)\n np.random.shuffle(self._analogy_questions)",
"def read_quiz_json(self):\n\n quiz_file_path = QUIZ_FILE_DIR + QUIZ_FILE_NAME\n quiz_json_file = open(quiz_file_path)\n quiz_json_string = quiz_json_file.read()\n quiz_json_file.close()\n question_list = json.loads(quiz_json_string)\n return(question_list)",
"def __init__(self, file, question_list, encoding, read_file=True, load_questions=True, verbose=True):\n self.file = file\n self.question_list = question_list\n self.encoding = encoding\n\n self.data = pd.DataFrame\n\n if read_file:\n self.read_file()\n\n if load_questions:\n self.load_questions(verbose)",
"def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))",
"def examples_from_file(path):\n examples = []\n\n # count total lines before loading\n total_lines = int(local('wc -l {}'.format(path), capture=True).split()[0])\n\n with codecs.open(path, 'r', encoding='utf-8') as f:\n for line in verboserate(f, desc='Reading data file.', total=total_lines):\n src, trg = line.strip().lower().split('\\t')\n src_words = src.split(' ')\n trg_words = trg.split(' ')\n assert len(src_words) > 0\n assert len(trg_words) > 0\n\n if use_diff:\n ex = EditExample.salient_diff(src_words, trg_words, free_set)\n else:\n ex = EditExample.whitelist_blacklist(src_words, trg_words)\n examples.append(ex)\n return examples",
"def parse_problem(path_to_file):\n with open(path_to_file, 'r') as f:\n lines = f.readlines()\n return parse_problem_lines(lines)",
"def getUsers(users_file):\n user_names = tuple(open(users_file, 'r'));\n for user_name in user_names:\n clean_user_name = user_name.rstrip(\"\\n\")\n listQuestions(clean_user_name)",
"def deserialize(path):\n with open(path, 'rb') as f:\n temp = pickle.load(f)\n for q in temp.questions:\n q.on_deserialize()\n return temp",
"def read_file(path: str) -> Iterator[Problem]:\n with open(path) as f:\n txt = f.read()\n\n for encoded_problem in txt.split('\\n\\n'):\n yield parse_alpha_encoding(encoded_problem)",
"def read_answers(path: str):\n if not os.path.exists(path):\n raise FileNotFoundError(\"file {} does not exists\".format(path))\n\n collection = {}\n with open(path, 'r') as file:\n for l in file.readlines():\n split = l.split(' ')\n index, doc_id = int(split[0]), int(split[1])\n if not collection.get(index):\n collection[index] = [doc_id]\n else:\n collection[index] += [doc_id]\n\n return collection",
"def load_questions(self, verbose=True):\n for question in self.question_list:\n question.load_question(self.data)",
"def load_problems(filename):\n problems = []\n f = open(filename, 'r')\n while 1:\n try:\n coins = f.readline()\n if coins:\n coins = [int(num) for num in coins.replace('[', '')\n .replace(']', '')\n .replace(' ', '')\n .split(',') if num not in '\\n']\n else:\n break\n change = f.readline()\n if change:\n change = change.replace('\\n', '')\n change = int(change)\n else:\n break\n except Exception:\n break\n\n problems.append((coins, change))\n\n return problems",
"def parse_question_data(self):\n section = ''\n subsection = ''\n quest = ''\n # The data falls into 4 cases\n # 1. Sections\n # 2. subsections\n # 3. questions\n # 4. answers.\n\n for line in self.question_data: \n\n if \":\" in line: # case #2\n subsection = line.split(\":\")[1] # split the line on the : into an array but only take the [1] element\n debug(\"Subsection: %s\" % subsection)\n \n elif \".\" in line: # this is either a question or an answer?\n \n if line.split(\".\")[0].isdigit(): # case #3 it's a question, split on . into an array and take the element to the left and ask if it's a digit.\n quest = line # Since we know it's something like \"3. Are you a warlock?\" we stick that in the quest varable.\n debug(\"Question: %s\" % quest)\n # Create a question object and stick it in the dictonary with the key being the question (since we know it'll be unique)\n self.questions[quest] = question(section, subsection, quest) # I know it's redundant to have the key and have a value.\n \n elif line.startswith(\".\"): # case #4 answer All the answers startswith \".\" \n debug(\"Answer: %s\" % line)\n # take the question and append it to the answers array in the question object.\n self.questions[quest].answers.append(line[2:]) # Trim the first two characters off the answer since it's \". the answer\"\n \n else: # case #1 # This is section like AMERICAN DEMOCRACY\n section = line # load the line from the file into the section variable\n debug(\"Section = %s\" % section)",
"def deserialize_from_file(self):\n\n # We cannot read from a file unless the user provides it in the\n # constructor.\n if not self._input_file:\n raise Exception(\"No input file provided to deserialize from.\")\n\n # Build a record list from the file contents.\n records = []\n for record in self.deserialize_next_from_file():\n records.append(record)\n\n self.close_file_deserializer()\n\n return records",
"def getQuestions(self, CSVfile, week): #create the list of questions for this week\n questions = []\n RawData = csv.reader(open(self.CSVfile, newline=''))\n qLine = RawData.__next__()\n for question in qLine[0:-1]:\n questions.append(Question(question, \n qLine.index(question),\n CSVfile,\n week))\n self.questions = questions",
"def read_answer(filename):\n import shlex\n listPts = []\n\n filename = \"Data/{}.out\".format(filename)\n\n with open(filename, 'r') as data:\n answers = data.readline()\n answers = \",\".join(shlex.split(answers))\n answers = answers.split(\",\")\n for i in answers:\n listPts.append(int(i))\n\n return listPts",
"def test_reader(qn_filepath, answers_dirpath):\n qns = get_questions(qn_filepath)\n for qn in qns:\n if qn.qid == 100:\n q = qn\n break\n assert q\n docs = get_documents(answers_dirpath, q.qid)\n print docs\n print docs[0].content",
"def start_test(n_questions, questions_file):\r\n\tfile = open(\"questions/\"+questions_file, \"r\")\r\n\tc = 0\r\n\tcorrect = []\r\n\twrong = []\r\n\r\n\tfor i in range(3325):\r\n\t\tline = file.readline()\r\n\t\t\r\n\t\tif \"Answer: \" in line:\r\n\t\t\tc += 1\r\n\t\t\tif len(str(line[8:]).strip()) > 1:\r\n\t\t\t\tprint(\"\\nMultiple Choice\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"\\nSingle Choice\")\r\n\t\t\tansw = str(input(\"\\nAnswer: \")).upper()\r\n\t\t\tif answ == str(line[8:]).strip():\r\n\t\t\t\tcorrect.append(c)\r\n\t\t\telse:\r\n\t\t\t\twrong.append(c)\r\n\t\t\t\tprint(\"Correct answer: \", str(line[8:]))\r\n\t\t\tprint(\"\\n\",\"-\"*10, \"Correct: \", len(correct), \"-\"*10, \"\\n\", \"-\"*10, \"Wrong: \", len(wrong), \"-\"*10 )\r\n\t\t\tcontinue\r\n\r\n\t\tif c == n_questions:\r\n\t\t\tbreak\r\n\t\tprint(line)\r\n\r\n\tdict_result = {\"Corrects\":correct,\r\n\t\t\t\t\t\"Wrongs\":wrong,\r\n\t\t\t\t\t\"n_questions\":n_questions}\r\n\r\n\tfile.close()\r\n\r\n\treturn dict_result",
"def read_examples(file_name):\n start = time.time()\n print 'Reading examples from tab separated file...'\n count = 0\n i = 0\n with open(file_name, 'r') as fp:\n relation_examples = []\n for i, line in enumerate(fp):\n line.strip()\n if len(line) == 0 or len(line.split()) == 0:\n raise IOError\n else:\n fields = line.split('\\t')\n assert len(fields) == 9, \"a problem with the file format (# fields is wrong) len is \" + str(len(fields)) + \"instead of 9\"\n relation_examples.append([str(count)] + fields)\n count += 1\n print ' File contained {} lines'.format(i + 1)\n print ' Datapoints with valid features encoded: {}'.format(count)\n print ' Done in {:.2f} sec'.format(time.time() - start)\n return relation_examples"
] |
[
"0.7246751",
"0.7241736",
"0.68975824",
"0.6749347",
"0.64885396",
"0.6372762",
"0.6370283",
"0.6272366",
"0.6198504",
"0.6065089",
"0.6028565",
"0.5912043",
"0.58960605",
"0.5824803",
"0.5816058",
"0.57964784",
"0.57829934",
"0.5780258",
"0.57793343",
"0.575813",
"0.57577366",
"0.5754807",
"0.5717843",
"0.56986225",
"0.569326",
"0.5688286",
"0.5654515",
"0.5654036",
"0.5634098",
"0.56135184"
] |
0.7270864
|
0
|
Given the question id and path to answers for all questions, return the list of documents that contain answers
|
def get_documents(path_to_dir, qn_id):
files_to_question = []
directory = os.path.join(path_to_dir, str(qn_id))
filenames = os.listdir(directory)
filenames.sort(key=int)
for filename in filenames:
doc_name, _ = os.path.splitext(filename)
document_filepath = os.path.join(directory, filename)
with open(document_filepath, 'rb') as subfile:
subfile_data = subfile.readlines()
# Use tried-and-tested tokenizer code from P1...
tokenized_sentences = string_to_tokens(subfile_data)
if not tokenized_sentences:
continue
doc = Document(doc_id=int(doc_name), qn_id=qn_id, content=tokenized_sentences)
files_to_question.append(doc)
return files_to_question
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_answers(path: str):\n if not os.path.exists(path):\n raise FileNotFoundError(\"file {} does not exists\".format(path))\n\n collection = {}\n with open(path, 'r') as file:\n for l in file.readlines():\n split = l.split(' ')\n index, doc_id = int(split[0]), int(split[1])\n if not collection.get(index):\n collection[index] = [doc_id]\n else:\n collection[index] += [doc_id]\n\n return collection",
"def find_paths(documents: List[str], question: str, candidate: str,\n style='wikihop') -> Optional[List]:\n sentlimit = 1\n nearest_only = False\n d = process_data(documents, question, candidate)\n\n doc_ners = d['docners']\n doc_postags = d['docpostags']\n doc_sents = d['docsents']\n\n qpos = d[\"qpos\"]\n qner = d[\"qner\"]\n qlemma = d['qlemma']\n rel = qlemma[0]\n entity = ' '.join(qlemma[1:]).lower()\n candidates = []\n orig_candidates = [d['candidate']]\n for ctoks in orig_candidates:\n sctoks = [stemmer.stem(ca) for ca in ctoks]\n if sctoks in candidates:\n candidates.append(ctoks)\n else:\n candidates.append(sctoks)\n candidates = [' '.join(cand) for cand in candidates]\n candpos = [d['cpos']]\n candner = [d['cner']]\n\n doc_sents_lemma = lemmatize_docsents(doc_sents, stem)\n\n if style.strip().lower() == \"wikihop\":\n pf = PathFinder(\"qid\", doc_sents_lemma,\n entity, rel,\n candidates,\n answer=None,\n sentlimit=sentlimit,\n nearest_only=nearest_only)\n else:\n pf = ObqaPathFinder(\"qid\", doc_sents_lemma,\n qlemma, qpos, qner,\n candidates, candpos, candner,\n answer=None, sentlimit=sentlimit,\n nearest_only=nearest_only)\n\n paths = pf.get_paths(doc_ners, doc_postags)\n if len(paths) == 0:\n print(\"No Paths Found !!\")\n return None\n # pathdict = {\"id\": \"qid\", \"pathlist\": paths[list(paths.keys())[0]]}\n return paths[list(paths.keys())[0]]",
"def find_answers_to_a_question(list_name, question_id):\n\n my_items = [element for element in list_name if element[\n 'question_id'] == question_id]\n\n if my_items:\n return my_items\n return False",
"def findall_path_from_org_id(self, path, org_id):\n for org_question in self.merged_root.iter('OrgQuestion'):\n if org_question.attrib['ORGQ_ID'] == org_id:\n extraction = org_question.findall(path)\n if len(extraction) != 0:\n return extraction\n\n return list()",
"def answers_all(self):\n return self.answer_set.all()",
"def answers(self):\n from quiz.models import Answer\n qids = self.values_list('id', flat=True)\n return Answer.objects.filter(\n question__id__in=qids).select_related('question')",
"def get_answers(question_id, api_site_parameter, rpc = None, page = 1, body = False, comments = False, pagesize = 100, sort = 'votes'):\n path = \"questions/%d/answers\" % question_id\n \n query_filter = '.p-I38n'\n \n if body:\n query_filter = '-m8C*uMP-q0'\n if comments:\n query_filter = ')(Ybp0wdAN'\n if body and comments:\n query_filter = 'D9l0ZsiD'\n if pagesize == 0:\n query_filter = '!-q2Rj6nE'\n \n results = __fetch_results(path, api_site_parameter, rpc = rpc, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def test_reader(qn_filepath, answers_dirpath):\n qns = get_questions(qn_filepath)\n for qn in qns:\n if qn.qid == 100:\n q = qn\n break\n assert q\n docs = get_documents(answers_dirpath, q.qid)\n print docs\n print docs[0].content",
"def documents_dslquery(dsl_dict, **kwargs):\n return _dslquery('documents', dsl_dict, **kwargs)",
"def _retrieveQuestions(self, questID):\n all_related_quests = set([quest for doc in self._Docs\n for quest in self.doc2quest[doc]])\n random_quests = random.sample(self.questions.keys(), 40)\n\n Quests = [questID]\n Quests += [random.choice(self.doc2quest[dID]) for dID in self._Docs[1:5]]\n\n for quest in random_quests:\n\n if len(Quests) == 10:\n break\n if not quest in all_related_quests:\n Quests += quest\n\n return Quests",
"def get_collection_exercises_by_survey(survey_id):\n logger.info(\"Retrieving collection exercises\", survey_id=survey_id)\n url = f'{app.config[\"COLLECTION_EXERCISE_URL\"]}/collectionexercises/survey/{survey_id}'\n response = requests.get(url, auth=app.config[\"BASIC_AUTH\"])\n\n if response.status_code == 204:\n return []\n try:\n response.raise_for_status()\n except HTTPError:\n logger.exception(\"Failed to retrieve collection exercises by survey\", survey_id=survey_id)\n raise ApiError(response)\n\n logger.info(\"Successfully retrieved collection exercises by survey\", survey_id=survey_id)\n return response.json()",
"def get(self, question_id):\n response = Answers.get_all_answers(question_id)\n\n return response",
"def set_answers(self, answers):\n self.logger.info(\"Add answer : %s\" % answers)\n try:\n page_index = 0\n for page in self._answer_payload['pages']:\n if page['id'] == self._current_page['id']:\n break\n page_index += 1\n\n if page_index == len(self._answer_payload['pages']): # page not found\n self._answer_payload['pages'].append({\n \"id\": self._current_page['id'],\n \"questions\": []\n })\n\n question_index = 0\n for question in self._answer_payload['pages'][page_index]['questions']:\n if question['id'] == self._current_question['id']:\n break\n question_index += 1\n\n if question_index == len(self._answer_payload['pages'][page_index]['questions']): # question not found\n self._answer_payload['pages'][page_index]['questions'].append({\n \"id\": self._current_question['id']\n })\n\n _answers = []\n for answer in answers:\n _answers.append(answer)\n\n self._answer_payload['pages'][page_index]['questions'][question_index]['answers'] = _answers\n\n if self._current_question['variable_id'] is not None:\n self._answer_payload['pages'][page_index]['questions'][question_index]['variable_id'] = str(self._current_question['variable_id'])\n\n except Exception as e:\n self.logger.error(\"Error on add answer : %s\" % e)",
"def iterateAnswers(db, postIds):\n c=db.cursor()\n strPostId = \",\".join([str(postId) for postId in postIds])\n #logging.debug(\"Loading answers...\")\n c.execute(\"\"\"SELECT * FROM posts WHERE type_id=2 AND parent_id IN (%s) ORDER BY FIELD(parent_id, %s)\"\"\" % (strPostId, strPostId))\n for answer in c.fetchall():\n yield Post(answer)\n c.close()",
"def load_corpus_questions():\r\n\tglobal search_criteria_dict, solution_dict, linked_abstracts_dict\r\n\tif os.path.exists(paths.path_data_questions_pickle):\r\n\t\tprint('\\nloading questions and answers')\r\n\t\tsearch_criteria_dict = pickle.load(open(paths.path_data_questions_pickle,\"rb\"))\r\n\t\tsolution_dict = pickle.load(open(paths.path_data_answers_pickle,\"rb\"))\r\n\t\tlinked_abstracts_dict = pickle.load(open(paths.path_data_linkedabstracts_pickle,\"rb\"))\r\n\t\t\r\n\t\tprint(len(search_criteria_dict))\r\n\t\tprint(len(solution_dict))\r\n\t\tprint(len(linked_abstracts_dict))\r\n\t\t\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def answers(self):\n return self.answer_set.filter(active=True)",
"def get_answers_by_answer_id(self, answer_id):\n return self._answers_by_id.get(answer_id)",
"def query(self, words: list) -> list:\r\n relevant_documents = self.word_to_docs_mapping[words[0]]\r\n if len(words) > 1:\r\n for word in words:\r\n relevant_documents = relevant_documents & self.word_to_docs_mapping[word]\r\n return list(relevant_documents)",
"def get_answer(answer_id, api_site_parameter, body = False, comments = False, pagesize = 1):\n path = \"answers/%d\" % answer_id\n \n query_filter = ')(Y_v2R5Tz'\n \n if body:\n query_filter = '-m84pZ4-YWK'\n if comments:\n query_filter = ')(Ybxr-pC9'\n if body and comments:\n query_filter = 'D9kY06hX'\n \n results = __fetch_results(path, api_site_parameter, filter = query_filter, pagesize = pagesize)\n return results",
"def get_question_answers(self):\r\n # dict of (id, correct_answer)\r\n answer_map = dict()\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_map.update(results)\r\n\r\n # include solutions from <solution>...</solution> stanzas\r\n for entry in self.tree.xpath(\"//\" + \"|//\".join(solution_tags)):\r\n answer = etree.tostring(entry)\r\n if answer:\r\n answer_map[entry.get('id')] = contextualize_text(answer, self.context)\r\n\r\n log.debug('answer_map = %s', answer_map)\r\n return answer_map",
"def find_answers(soup, answer_counter, question_counter, url, columns):\n dictionaries = []\n divs = soup.find_all(\"div\", class_=\"Answer AnswerBase\") # finds all the div tags with the answer class\n q_div = soup.find_all(\"h1\")\n for q in q_div:\n question_text = q.find(\"span\", class_=\"ui_qtext_rendered_qtext\")\n question_counter += 1\n for d in divs:\n answers = d.find_all(\"p\") # within the div tags finds all the paragraph tags so answers can be kept together\n answer_counter += 1\n all_mispelled = set()\n length = 0\n with open(str(answer_counter) + '_Experiences in life_' + str(question_counter) + \".txt\", \"w+\") as f:\n for a in answers:\n if filter_url_or_answer(a.text):\n break\n f.write(a.text) # writes each answer in a separate text file\n f.write(\"\\n\")\n mispelled, line_length = check_spelling(a.text)\n length += line_length\n all_mispelled.update(set(mispelled))\n dictionary = make_dictionary(answer_counter, question_counter, url, columns, list(all_mispelled), question_text.text, length)\n dictionaries.append(dictionary)\n return answer_counter, question_counter, dictionaries",
"def extract_data(data_path,\n excluded_contexts=DEFAULT_EXCLUDED_CONTEXTS,\n excluded_questions=DEFAULT_EXCLUDED_QUESTIONS,\n contain_answers=True):\n # opens the json file\n with open(data_path, 'r') as f:\n dataset = json.load(f)\n\n if contain_answers:\n columns = ['id', 'title', 'context', 'question', 'answer', 'start']\n else:\n columns = ['id', 'title', 'context', 'question']\n\n # stores each record in a list by exploring the levels of the json object\n samples = []\n for data in dataset['data']:\n title = data['title']\n for paragraph in data['paragraphs']:\n context = paragraph['context']\n if context not in excluded_contexts:\n for qas in paragraph['qas']:\n question = qas['question']\n if question not in excluded_questions:\n record_id = qas['id']\n if contain_answers:\n answer = qas['answers'][0]['text']\n start = qas['answers'][0]['answer_start']\n sample = [record_id, title, context, question, answer, start]\n else:\n sample = [record_id, title, context, question]\n samples.append(sample)\n # creates a dataframe from that list\n return pd.DataFrame(samples, columns=columns)",
"def load_file(p_path_to_data):\r\n\r\n all_answers = []\r\n query_ids = []\r\n no_answer_query_ids = set()\r\n with open(p_path_to_data, 'r', encoding='utf-8') as data_file:\r\n for line in data_file:\r\n try:\r\n json_object = json.loads(line)\r\n except json.JSONDecodeError:\r\n raise Exception('\\\"%s\\\" is not a valid json' % line)\r\n\r\n assert \\\r\n QUERY_ID_JSON_ID in json_object, \\\r\n '\\\"%s\\\" json does not have \\\"%s\\\" field' % \\\r\n (line, QUERY_ID_JSON_ID)\r\n query_id = json_object[QUERY_ID_JSON_ID]\r\n\r\n assert \\\r\n ANSWERS_JSON_ID in json_object, \\\r\n '\\\"%s\\\" json does not have \\\"%s\\\" field' % \\\r\n (line, ANSWERS_JSON_ID)\r\n answers = json_object[ANSWERS_JSON_ID]\r\n if 'No Answer Present.' in answers:\r\n no_answer_query_ids.add(query_id)\r\n answers = ['']\r\n all_answers.extend(answers)\r\n query_ids.extend([query_id]*len(answers))\r\n\r\n all_normalized_answers = normalize_batch(all_answers)\r\n\r\n query_id_to_answers_map = {}\r\n for i, normalized_answer in enumerate(all_normalized_answers):\r\n query_id = query_ids[i]\r\n if query_id not in query_id_to_answers_map:\r\n query_id_to_answers_map[query_id] = []\r\n query_id_to_answers_map[query_id].append(normalized_answer)\r\n return query_id_to_answers_map, no_answer_query_ids",
"def get_all_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question, user_id FROM question\")\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=q[2])) for q in questions}\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id, answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id\")\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()",
"def test_get_answers_by_user(self):\n user = self.create_user()\n user_id = user[0] # answer author user id\n question_id = int(self.create_question()[0])\n auth_token = user[1]\n posted_answers = [\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n },\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n },\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n }]\n for i, elem, in enumerate(posted_answers):\n self.post_data(question_id, auth_token=auth_token, data=elem)\n path = \"/api/v2/answers/users/{}\".format(user_id)\n headers = {\"Authorization\":\"Bearer {}\".format(auth_token),\n \"Content-Type\":\"application/json\"}\n answers = self.client.get(path, headers=headers)\n self.assertEqual(answers.status_code, 200)\n self.assertEqual(len(answers.json[\"answers\"]), len(posted_answers))",
"def get_user_answers(user_id):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n answer_table = dynamodb.Table(\"Answers\")\n\n filterexpression = Attr(\"UserId\").eq(user_id)\n response = answer_table.scan(FilterExpression=filterexpression)\n answers = response.get(\"Items\")\n\n return answers",
"def questions(self, request, pk):\n tag = self.get_object()\n questions = tag.questions.all()\n serializer = QuestionSerializer(questions, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_answer_texts(\n self, answer_token_idxs: Dict[QuestionId, Tuple[Any, ...]]\n ) -> Dict[QuestionId, str]:\n return self.corpus.get_answer_texts(answer_token_idxs)",
"def search_questions():\n try:\n request_data = request.get_json()\n questions = get_all_questions(\n query=request_data.get('searchTerm')\n )\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n })\n\n except Exception as exp:\n abort(exp.code)",
"def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return"
] |
[
"0.66400397",
"0.5719572",
"0.57046854",
"0.56917447",
"0.5655302",
"0.55065054",
"0.54775417",
"0.5461554",
"0.5448024",
"0.5443218",
"0.52777815",
"0.5240148",
"0.52308375",
"0.519296",
"0.51627445",
"0.51517123",
"0.51205117",
"0.5093833",
"0.5086946",
"0.50797665",
"0.507237",
"0.50528646",
"0.50243145",
"0.5020889",
"0.5011914",
"0.49734402",
"0.4972132",
"0.49448293",
"0.4925592",
"0.49131808"
] |
0.6150074
|
1
|
Run any available automatic updates
|
def _do_automatic_updates(self):
from .updates import Updates
for update_name in Updates.check_automatic_updates():
print("Applying automatic update: {}".format(update_name))
Updates.do_update(update_name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update( ):\r\n pass",
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def publish_updates():\n run_subprocess(['osg-batch-update'])",
"def _update(self, force=False):\n if self.autoupdate:\n self.update(force)",
"def update():",
"def update():",
"def process_updates():\n print \"[{x}] Processing Requests\".format(x=dates.now())\n WorkflowApi.process_requests()\n WorkflowApi.process_enhancements()",
"def cli():\n update_all_posts()\n push_updates()",
"def force_update():\n # TODO: IS THERE A WAY TO ONLY REFRESH FOR A GIVEN YEAR?\n # TODO: FIND A WAY TO DO THIS ASYNCHRONOUSLY\n print('Starting update...')\n # TODO: THIS IS A PRETTY BAD WORKAROUND. WE SHOULD FIND A WAY TO PROVIDE THE SCRIPTS WITH THE 'LANDTAGSWAHLDB' PACKAGE\n sql_path = pathlib.Path(current_app.instance_path).parent.parent / 'sql-scripts' / 'UpdateViews.sql'\n with open(sql_path) as sql_file:\n script = sql_file.read()\n db = db_context.get_db()\n db.run_script(script)\n db.commit()\n return 'Success'",
"def sync_entries():\n import time\n\n while True:\n try:\n update_pending_scripts(settings['api_handler'])\n except:\n logging.exception(\"Error occured during synchronisation\")\n time.sleep(60)",
"def run(self):\n self.update_repos()",
"def refresh(self):\n self.update_from_file()\n self.update_from_env()",
"def update_data(update_method):\n log.debug('Starting update')\n cmd = ['/usr/bin/python', wf.workflowfile('update.py')]\n if update_method == 'force':\n cmd.append('--update')\n cmd.append('force')\n\n # Update projects data\n log.debug('Run update command : {}'.format(cmd))\n run_in_background('update', cmd)\n\n return 0",
"def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()",
"def Automaticupdatesobjects():\n pass",
"def run(updater: Updater):\n logger = getLogger()\n logger.info(\"Starting polling\")\n updater.start_polling()",
"def update():\n\n # load the OPML file and update any feeds\n for o in oercloud.Session().query(oercloud.Feed).filter_by(\n feed_type=oercloud.feed.OPML):\n \n aggregator.LOG.info(\"Loading OPML from %s\" % o.url)\n update_feed_list(opml.parse(o.url))\n\n # check each feed and see if it should be polled\n check_feeds()",
"def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass",
"def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')",
"def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()",
"def cronjobs():\n cj.update_cronjob_db()",
"def run(self):\n\n run_command(['apt-get', 'update'])\n run_command(['apt-get', 'install', '-y', 'unattended-upgrades'])\n run_command(['apt-get', 'upgrade', '-y'])",
"def do_update(services):\n\n global running_update\n\n for service in services:\n feed = registry[service.name][0]\n try:\n if type(feed) is list:\n entries = []\n for f in feed:\n entries.extend(f(service))\n else:\n entries = feed(service)\n # TODO should be in a transaction\n for entry in entries:\n entry.save()\n service.updated = datetime.utcnow()\n service.save()\n except:\n logging.exception(msg='updater exception for service %s' %service.name, exception=True)\n running_update = False",
"def cli():\n\n # XXX load the option parser and parser the command line\n\n aggregator.LOG.debug(\"Beginning feed update process.\")\n update()",
"def update(*args):",
"def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()",
"def update(services=None):\n\n global running_update\n global running_update_lock\n\n to_update = []\n if services == None:\n services = Service.objects.all()\n\n for service in services:\n if registry.has_key(service.name):\n entry = registry[service.name][0]\n if entry != None and service.include_update:\n prev_update = service.updated\n if prev_update + timedelta(minutes=service.period) <= datetime.utcnow():\n to_update.append(service)\n else:\n logging.warning('updater for service %s not found' %service.name)\n\n running_update_lock.acquire()\n if not running_update and len(to_update) > 0:\n running_update = True \n running_update_lock.release()\n msg = 'Thanks! Wait a moment and refresh this page to see the latest updates.'\n if (settings.UPDATE_THREAD):\n UpdateThread(to_update).start()\n else:\n do_update(to_update)\n else:\n msg = 'Nothing to feed! All services are up-to-date. Thanks anyway!'\n\n return msg",
"def ScheduleUpdate(self):\r\n self.__fCachedFiles = None\r\n if self._timeout_id is not None:\r\n return\r\n app = wingapi.gApplication\r\n self._timeout_id = app.InstallTimeout(1000, self.__CB_DoUpdate)",
"def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()",
"def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)"
] |
[
"0.68387514",
"0.67803097",
"0.67694116",
"0.67483944",
"0.6739298",
"0.6739298",
"0.673246",
"0.6704835",
"0.6684409",
"0.66661066",
"0.66234773",
"0.6621876",
"0.65760237",
"0.6525605",
"0.65126944",
"0.6494933",
"0.645831",
"0.6454078",
"0.6441949",
"0.6441283",
"0.6387731",
"0.63628834",
"0.6356283",
"0.63507855",
"0.63505965",
"0.6348278",
"0.63041043",
"0.62875444",
"0.62853765",
"0.62416553"
] |
0.85161316
|
0
|
Get directory for output files. Uses environment variable ``BRIGHTWAY2_OUTPUT_DIR``; ``preferences['output_dir']``; or directory ``output`` in current project. Returns output directory path.
|
def output_dir(self):
ep, pp = (
maybe_path(os.getenv("BRIGHTWAY2_OUTPUT_DIR")),
maybe_path(config.p.get("output_dir")),
)
if ep and ep.is_dir():
return ep
elif pp and pp.is_dir():
return pp
else:
return self.request_directory("output")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_output_path():\n return os.getcwd() + \"/output/\"",
"def get_output_dir(self):\n return self.output_dir",
"def outputdir():\n return __OUTPUT_DIR__",
"def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)",
"def get_output_dir(self):\n return self.OUTPUT_DIRECTORY",
"def get_output_directory(self):\n return self.__output_dir",
"def output_dir(self):\n return self.c.build_dir.join(self.c.build_config_fs)",
"def getOutputDir():\n directory = os.path.join(Configurations.getProjectRootDir(), OUTPUT_DIR_NAME)\n if not os.path.exists(directory):\n logger.warning('Directory %s not exist, CREATE!', directory)\n os.makedirs(directory)\n\n return directory",
"def output_dir():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n outpath = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n return outpath",
"def getTradeOutputDir():\n\tglobal config\n\treturn config['directory']['output']",
"def __get_output_dir(self, conf):\n return conf[self.conf_item.get_output_dir()]",
"def getOutputDirectory(self):\n return self._outputDir_",
"def output_directory(self):\n if self._output_directory is None:\n cache_filename = self._original_cache\n output_directory = settings.cache_folder / cache_filename\n output_directory.makedirs_p()\n self._output_directory = output_directory.expand()\n return Path(self._output_directory)",
"def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)",
"def get_output_dir(imdb, net):\n path = os.path.abspath(os.path.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if net is None:\n return path\n else:\n return os.path.join(path, net.name)",
"def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)",
"def get_output_folder(self):\n return os.path.join(self.root_output_folder, self.base_fish_folder)",
"def get_output_path(backup_file, output_root):\n dir_path = backup_file.translated_path()\n full_output_path = os.path.join(output_root, dir_path)\n return os.path.normpath(full_output_path)",
"def dir_from_output(output):\n log.debug(\"real output: %s\", output)\n if platform.system() == \"Darwin\":\n # [HACK] uh... I'm not sure why it happens like this...\n folder_to_search = path.join(output, '..', '..')\n log.debug(\"folder to search: %s\", folder_to_search)\n return folder_to_search\n elif platform.system() == \"Windows\":\n log.debug(\"architecture: %s\", platform.architecture())\n folder_to_search = path.join(output, '..')\n log.debug(\"folder to search: %s\", folder_to_search)\n return path.normpath(folder_to_search)\n elif platform.system() == \"Linux\":\n return path.normpath(path.dirname(output))\n return None",
"def _get_output_directory(self):\n self._set_output_directory(QFileDialog.getExistingDirectory(self._widget, \"Select output directory\"))",
"def output_dir(self):\n return os.path.join(self.checkpoint_dir, self.model_dir)",
"def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)",
"def _set_output_dir(self):\n return os.path.join(self.outputDir,\n datetime.datetime.utcnow().strftime(\"%Y%m%d\"))",
"def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path",
"def GetOutputPath(self):\n self.outputDir = raw_input(\"What path should be outputted to?\\n\\r>>> \")\n if self.outputDir is \"\":\n self.outputDir = \"C:\\Users\\Lucas\\Pictures\\GraphOutput\"\n bob = os.path.isabs(self.inputDir)\n if not bob:\n print \"that was not an excepted path name. Try again\"\n self.GetOutputPath()",
"def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path",
"def out_dir(self) -> str:\n return self._out_dir",
"def getOutputFile(fname):\n return os.path.join(Configurations.getOutputDir(), fname)",
"def OutputPath(self):\n return os.path.join(self._module.workspace, \n \"broc_out\", \n self._module.module_cvspath,\n \"output\")",
"def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path"
] |
[
"0.7468781",
"0.7314277",
"0.7295703",
"0.7294863",
"0.72655517",
"0.71530145",
"0.7125894",
"0.7114579",
"0.70545405",
"0.70489943",
"0.6987306",
"0.6768059",
"0.6752238",
"0.66888946",
"0.6680562",
"0.66408074",
"0.66290057",
"0.6622328",
"0.6595422",
"0.6581295",
"0.65160143",
"0.6512174",
"0.6466365",
"0.64392424",
"0.6402976",
"0.63823974",
"0.63756067",
"0.6374563",
"0.63400424",
"0.63206387"
] |
0.8114319
|
0
|
Copy current project to a new project named ``new_name``. If ``switch``, switch to new project.
|
def copy_project(self, new_name, switch=True):
if new_name in self:
raise ValueError("Project {} already exists".format(new_name))
fp = self._base_data_dir / safe_filename(new_name, full=self.dataset.full_hash)
if fp.exists():
raise ValueError("Project directory already exists")
project_data = ProjectDataset.get(ProjectDataset.name == self.current).data
ProjectDataset.create(
data=project_data, name=new_name, full_hash=self.dataset.full_hash
)
shutil.copytree(self.dir, fp)
create_dir(self._base_logs_dir / safe_filename(new_name))
if switch:
self.set_current(new_name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def switch_project(self, project_name, check=True):\n with self.app.page_base.dropdown_menu_project as menu:\n\n if menu.label_project.value == project_name:\n self.app.current_project = project_name\n return\n\n menu.click()\n menu.item_project().click()\n self.app.current_project = project_name\n\n if check:\n self.close_notification('success')\n assert_that(menu.label_project.value, equal_to(project_name))",
"def _change_project(self):\n project_key = utils.prompt_string(\n 'You are currently managing Google Cloud Project {!r}.\\n'\n 'This project is currently saved as {!r}.\\n'\n 'All of the currently configured projects include: {}.\\n'\n 'Which project would you like to switch to?'.format(\n self._config.project, self._config.key,\n ', '.join(common.get_available_configs(self._config.path))))\n return _Manager.new(\n self._config.path, self._prefer_gcs, project_key=project_key,\n version=self._version)",
"def update_project_name(self, curr_proj, proj_new_name):\r\n for proj in self.__projects:\r\n if proj == curr_proj: # Find the project with the same current name\r\n proj.update_name(proj_new_name) # Update the project's name\r",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n SwitchWorkspaceCC(workspace_name, create_if_missing)",
"def NewProject (projectname):\n\tif projectname == \"\" or projectname == None:\n\t\tnewprojcode(projectname)\n\telse:\n\t\tnewprojCode_withNamed()",
"def newProject(self):\n dialog = NewProjectDialog()\n if not dialog.name is None and not dialog.path is None:\n self._app.createProject(str(dialog.name), str(dialog.path))",
"def update_project_name(self, project_name, new_project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_edit.click()\n\n with page_projects.form_edit_project as form:\n form.field_name.value = new_project_name\n form.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=new_project_name).wait_for_presence()",
"def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)",
"def upgrade_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"newt upgrade\")",
"def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))",
"def clone_project(\n project_name,\n from_project,\n project_description=None,\n copy_annotation_classes=True,\n copy_settings=True,\n copy_workflow=True,\n copy_contributors=False\n):\n try:\n get_project_metadata_bare(project_name)\n except SANonExistingProjectNameException:\n pass\n else:\n raise SAExistingProjectNameException(\n 0, \"Project with name \" + project_name +\n \" already exists. Please use unique names for projects to use with SDK.\"\n )\n metadata = get_project_metadata(\n from_project, copy_annotation_classes, copy_settings, copy_workflow,\n copy_contributors\n )\n metadata[\"name\"] = project_name\n if project_description is not None:\n metadata[\"description\"] = project_description\n\n return create_project_from_metadata(metadata)",
"def createproject(project_name):\n app_clone_script = 'git clone https://github.com/jaarce/falcon-bp.git %s' % project_name\n subprocess.call(app_clone_script.split(' '))",
"def SwitchWorkspace(workspace_name, create_if_missing=True):\n if workspace_name == '':\n raise ValueError('The workspace name should not be empty.')\n _C.SwitchWorkspace(workspace_name, create_if_missing)",
"def qck_gen_proj(self, master):\r\n if not self._check_project_name():\r\n return\r\n\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n\r\n # Add the board\r\n try:\r\n userBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n except IndexError:\r\n tkMessageBox.showinfo(\"No board selected!\",\\\r\n \"Make sure a board has been selected.\")\r\n return\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # Quick check to see if this poject already exists\r\n checkPath = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/' + self.newProj.name\r\n if os.path.isdir(checkPath):\r\n tkMessageBox.showinfo(\"Project exists\",\\\r\n \"A project by this name already exists.\")\r\n return\r\n\r\n # in quick mode there is always generated the board project\r\n self.newProj.isBoardProject = True\r\n \r\n # Add all drivers for this device\r\n self.localSDK.get_drivers()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.drvList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_drv(self.localSDK.drvList)\r\n\r\n kT.debug_log('Port Count: ' + str(self.newProj.portCount))\r\n\r\n #Generate IAR project files\r\n #self.newProj.fast_build_IAR()\r\n self.newProj.workSpace = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/'\r\n projectPath = self.newProj.workSpace + self.newProj.name\r\n\r\n #Get all include paths lists into one list\r\n includeList = []\r\n index = 0\r\n isPresent = False\r\n while index < len(self.newProj.drvList):\r\n count = 0\r\n while count < len(self.newProj.drvList[index][2]):\r\n isPresent = False\r\n newPath = str(\\\r\n self.newProj.drvList[index][2][count]\\\r\n )\r\n if len(includeList) > 0:\r\n listIndex = 0\r\n while listIndex < len(includeList):\r\n if newPath == includeList[int(listIndex) - 1]:\r\n isPresent = True\r\n listIndex += 1\r\n if not isPresent:\r\n includeList.append(newPath)\r\n count += 1\r\n index += 1\r\n\r\n self.newProj.libList.append('platform')\r\n if not os.path.isdir(projectPath):\r\n os.makedirs(projectPath)\r\n self.newProj.rtos = 'bm'\r\n\r\n if not os.path.isfile(projectPath + '/main.c'):\r\n self.newProj.make_main_file(projectPath, includeList)\r\n if not os.path.isfile(projectPath + '/hardware_init.c'):\r\n self.newProj.make_hw_file(projectPath)\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n ## Copy over BSP files\r\n if self.newProj.useBSP:\r\n if not os.path.isdir(projectPath + '/board'):\r\n os.mkdir(projectPath + '/board')\r\n bspDir = self.newProj.sdkPath + '/examples/' + self.newProj.board[1]\r\n bspList = kT.list_files(bspDir)\r\n for f in bspList:\r\n if f[-2:] == '.c':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n if f[-2:] == '.h':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.IARname, self.newProj.device):\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newIar = kIarNew.KsdkIarNew(self.newProj)\r\n else:\r\n newIar = kIar.KsdkIar(self.newProj)\r\n newIar.gen_ewp(self.newProj)\r\n newIar.gen_eww(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KeilMDK, self.newProj.device):\r\n #Generate MDK project files\r\n if self.localSDK.isNewVersion():\r\n newMdk = kMdkNew.KsdkMdkNew(self.newProj)\r\n else:\r\n newMdk = kMdk.KsdkMdk(self.newProj)\r\n newMdk.gen_proj(self.newProj)\r\n newMdk.gen_wkspace(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KinetisDesignStudio, self.newProj.device):\r\n #Generate KDS project fiels\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newKds = kKdsNew.KsdkKdsNew(self.newProj)\r\n else:\r\n newKds = kKds.KsdkKds(self.newProj)\r\n\r\n newKds.gen_cproject(self.newProj)\r\n newKds.gen_project(self.newProj)\r\n newKds.gen_working_set(self.newProj)\r\n newKds.gen_debug(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.AtollicStudio, self.newProj.device):\r\n #Generate ATL project files\r\n if self.localSDK.isNewVersion():\r\n newAtl = kAtlNew.KsdkAtlNew(self.newProj)\r\n else:\r\n newAtl = kAtl.KsdkAtl(self.newProj)\r\n newAtl.gen_cproject(self.newProj)\r\n newAtl.gen_project(self.newProj)\r\n newAtl.gen_debug(self.newProj)\r\n newAtl.gen_settings(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.ARMgcc):\r\n #Generate GCC project files\r\n if not self.newProj.fast_build_GCC():\r\n tkMessageBox.showinfo(\"Missing CMake Files\",\\\r\n \"CMake files are missing from your KSDK installation.\")\r\n\r\n #Text for window\r\n genString = 'Your project was created in the following location:\\n'\r\n pathString = ''\r\n pathString += self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath( self.newProj.board[1]) + '/' + self.newProj.name + '/'\r\n genString += pathString\r\n genString += '\\nPress the button below to open project location folder.'\r\n\r\n #Create window to show USER that project has been generated and where it is.\r\n popGen = Toplevel()\r\n if self.newProj.osType == 'Windows':\r\n winH = 100 * WIN_SCALE\r\n winW = 600 * WIN_SCALE\r\n elif self.newProj.osType == 'Darwin':\r\n if platform.mac_ver()[0][:5] == '10.10':\r\n winH = 100\r\n winW = 600\r\n elif platform.mac_ver()[0][:5] == '10.11':\r\n winH = 100\r\n winW = 660\r\n else:\r\n winH = 100\r\n winW = 600\r\n popGen.config(height=winH, width=winW)\r\n popGen.grid()\r\n if self.newProj.osType == 'Linux':\r\n img = Image(\"photo\", data=kImg.boardImages['kds_icon.gif']) # Use the .gif in Linux\r\n popGen.tk.call('wm', 'iconphoto', popGen._w, img)\r\n popGen.title(\"Project created\")\r\n popGen.geometry('%dx%d+%d+%d' % (winW, winH, master.winfo_x() + 20, master.winfo_y() + 20))\r\n popGen.resizable(width=FALSE, height=FALSE)\r\n popGen.configure(background='#E7E7E7')\r\n\r\n genTxt = Label(popGen, text=genString, justify=LEFT)\r\n genTxt.grid(row=0, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n #Create button to open project folder\r\n ## IF we are in windows, we need to replace all '/' with '\\\\'\r\n tempString = pathString[:]\r\n if self.newProj.osType == 'Windows':\r\n pathString = ''\r\n pathString = kT.string_replace(tempString, '/', '\\\\')\r\n\r\n genButton = Button(popGen, text='Open Project Folder', command=lambda: self.view_project(pathString, popGen))\r\n genButton.grid(row=2, column=0, sticky=W, padx=5, pady=5)\r\n\r\n self.widgetList[10].step(35)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # patch to implement automation test\r\n self.pop_gen = popGen\r\n\r\n return",
"def project_clone(request, proj_id=None):\n\n if not proj_id or not request.user.is_authenticated():\n raise Http404\n\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n project.pk = None\n project.user = request.user\n project.save()\n\n for scenario in Scenario.objects \\\n .filter(project_id=proj_id) \\\n .order_by('created_at'):\n scenario.pk = None\n scenario.project = project\n scenario.save()\n\n return redirect('/project/{0}'.format(project.id))",
"def set_project(\n name\n):\n if not is_alive():\n err_msg = \"Cannot connect to getML engine. Make sure the engine is running and you are logged in.\"\n raise ConnectionRefusedError(err_msg)\n\n cmd = dict()\n cmd[\"type_\"] = \"set_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def copy(self, new_name, new_config, retain_spot_price=False, delete_old=False):\n config = self.get()\n config.update(new_config)\n config[\"LaunchConfigurationName\"] = new_name\n\n # The following fields are not allowed as launch config input.\n config.pop(\"LaunchConfigurationARN\")\n config.pop(\"CreatedTime\")\n config.pop(\"KernelId\")\n config.pop(\"RamdiskId\")\n if not retain_spot_price and config.get(\"SpotPrice\", None) != None:\n logger.info(\"Not retaining spot price!\")\n config.pop(\"SpotPrice\")\n\n new_lc = LaunchConfig(new_name, aws_profile=self._aws_profile)\n new_lc.create(config)\n if delete_old:\n self.delete()\n return new_lc",
"def oldnewProject( projectName, obsblockName, subObsblockName='', \n subarray=DEFAULT,isDualCorr=False) :\n if(obsblockName == \"none\" or obsblockName == \"NONE\"\n or obsblockName == \"None\") :\n m = \" is a protected name and cannot be used as an obsblock name\"\n raise Exception, obsblockName + m\n rtdComment(\"Not using Project Database\", subarray)\n trialNo = myIncrementTrial( projectName, obsblockName, subObsblockName, False,isDualCorr)\n rtdComment( \"Trial is \" + str( trialNo ), subarray)\n multiSubarray('setObsblock', subarray,\n projectName,obsblockName,subObsblockName,trialNo)\n obsblockID = projectName + '.' + obsblockName + '.' + str(trialNo)\n #s.project( projectName )\n #s.obsblock( obsblockName )\n #s.subObsblock( subObsblockName )\n #s.trial( trialNo )\n multiSubarray('setScriptInt', subarray, odi.INDX_INT_OBSTRIAL, trialNo )\n constraints(subarray=subarray)\n return obsblockID",
"def view_project(self, pathString, window):\r\n\r\n kT.debug_log('This is the project path: ' + pathString)\r\n\r\n if self.newProj.osType == 'Windows':\r\n subprocess.call(['explorer', pathString])\r\n elif self.newProj.osType == 'Linux':\r\n subprocess.Popen(['xdg-open', pathString])\r\n elif self.newProj.osType == 'Darwin':\r\n subprocess.Popen(['open', pathString])\r\n\r\n window.destroy()\r\n\r\n return",
"def update_name(self, project: str, new_name: str) -> dict:\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$set': {\n 'name': new_name,\n }\n }\n )",
"def rename_project_file(self, old_project=None, new_project=None):\n old_is_project = type(old_project) is Project\n new_is_project = type(new_project) is Project\n\n # cancel if arguments are not projects\n if not old_is_project or not new_is_project:\n return False\n\n # generate filenames\n path = self.data_path + self.project_dir\n filename = path + '/' + self.us(old_project.project_id()) + '.flproject'\n filename_bu = path + '/' + self.us(old_project.project_id()) + '.flproject_bu'\n filename_new = path + '/' + self.us(new_project.project_id()) + '.flproject'\n filename_new_bu = path + '/' + self.us(new_project.project_id()) + '.flproject_bu'\n\n # check if the files exist and rename them\n if os.path.isfile(filename):\n os.rename(filename, filename_new)\n\n if os.path.isfile(filename_bu):\n os.rename(filename_bu, filename_new_bu)\n\n return True",
"def set_projects(self, name_short, name, disc_path):\n if name not in conf.projects:\n pass # TODO add the project in the conf\n else:\n return \"Project already exist\"\n return self.datas.create_path(disc_path)",
"def do_project(self, arg):\n def _usage():\n self.do_help('project')\n args = shlex.split(arg)\n if not args:\n _usage()\n return\n commands = ['create', 'delete', 'update']\n first_arg = args[0].lower()\n is_project_info = first_arg not in commands\n if is_project_info:\n # Get the project info\n project_name = args[0].decode('utf8')\n self.display_project_info(project_name)\n return\n if first_arg == 'create':\n # Create a new project\n self.create_project()\n return\n if len(args) == 1:\n print(self.error_wrong_parameters)\n _usage()\n return\n project_name = args[1].decode('utf8')\n if first_arg == 'update':\n # Update a project\n self.update_project(project_name)\n elif first_arg == 'delete':\n # Delete a project\n self.delete_project(project_name)\n return",
"def _on_new_project(self):\n lang = self.ddnGuiLanguage.get()\n projectfile = filedialog.asksaveasfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['BibTerms2Dict project'], \\\n defaultextension='.prj')\n if os.path.exists(projectfile):\n messagebox.showwarning(LOCALIZED_TEXT[lang]['New Project'], \\\n LOCALIZED_TEXT[lang]['{} already exist choose another name.'].\\\n format(os.path.basename(projectfile)))\n return\n else:\n newfile = codecs.open(fileout, mode='w', encoding='utf-8')\n newfile.close()\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n self.ddnCurProject.set(os.path.basename(projectfile)[:-4])\n self.update\n\n pass",
"def nd_ok_clicked(self, widget, data=None):\n filename = self.new_chooser.get_filename()\n self.communicator.new_project(filename)",
"def new_project(file_path):\n project_template_dir = pkg_resources.resource_filename('camtasia', os.path.join('resources', 'new.cmproj'))\n shutil.copytree(project_template_dir, file_path)",
"def newprojcode(name):\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\tproject_name = name\n\n\tif project_name == \"\" or project_name == None:\n\t\tcancel()\n\n\tprint \"*Nombre del Proyecto: \", project_name\n\n\tproject_languges = raw_input(\"*Lenguaje: \")\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tdirs = \"Project\" + pname + \"/\"\n\t#Nos ubicamos en el directorio raiz del Proyecto\n\tos.chdir(dirs)\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Creando los Archivos XML del Proyecto...\\n\"\n\tsubprocess.call(\"source XMLProjectFiles.sh\", shell=True)\n\tprint \"Se ha Creado el Proyecto\", pname, \" con Exito!!\"\n\n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 1000000)\t#Calcula numeros aleatorios de 1 a 1,000,000(1 millon)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tSaveKey(CodeValue)\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"",
"def copy_project_template(template_name, project_name):\n\tprojects_home = settings.PROJECTS_HOME\n\n\tif not os.path.isdir(projects_home):\n\t\t# let create it first\n\t\tos.mkdir(projects_home)\n\n\tproject_tpl = os.path.join(\n\t\tsettings.TEMPLATES_HOME,\n\t\t'project_template_{}'.format(template_name)\n\t)\n\n\toptions = {\n\t\t'verbosity': 0,\n\t\t'extensions': ['py'],\n\t\t'files': [],\n\t\t'secret_key': get_random_secret_key(),\n\t\t'template': project_tpl,\n\t}\n\n\tcmd = TemplateCommand()\n\n\tcmd.validate_name(project_name, \"project\")\n\n\tcmd.handle('project', project_name, projects_home, **options)",
"def initialize_new_project(self, flag_new=True):\n logging.debug(\"initialize new project...\")\n\n # self.lbLogoUnito.setVisible(not flag_new)\n self.lbLogoeMOC.setVisible(not flag_new)\n self.dwEthogram.setVisible(flag_new)\n self.dwSubjects.setVisible(flag_new)",
"def rename(name: str, new_name: str):\n profiles = prefect.settings.load_profiles()\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n if new_name in profiles:\n exit_with_error(f\"Profile {new_name!r} already exists.\")\n\n profiles.add_profile(profiles[name].copy(update={\"name\": new_name}))\n profiles.remove_profile(name)\n\n # If the active profile was renamed switch the active profile to the new name.\n prefect.context.get_settings_context().profile\n if profiles.active_name == name:\n profiles.set_active(new_name)\n if os.environ.get(\"PREFECT_PROFILE\") == name:\n app.console.print(\n f\"You have set your current profile to {name!r} with the \"\n \"PREFECT_PROFILE environment variable. You must update this variable to \"\n f\"{new_name!r} to continue using the profile.\"\n )\n\n prefect.settings.save_profiles(profiles)\n exit_with_success(f\"Renamed profile {name!r} to {new_name!r}.\")"
] |
[
"0.6570881",
"0.6161404",
"0.59923553",
"0.58610016",
"0.5829468",
"0.58159685",
"0.5797276",
"0.55726177",
"0.5498399",
"0.54869246",
"0.5472639",
"0.5428517",
"0.5388308",
"0.534181",
"0.53339595",
"0.53120494",
"0.5292763",
"0.5220154",
"0.5190714",
"0.5187932",
"0.5186884",
"0.5182432",
"0.51692426",
"0.5161192",
"0.51456255",
"0.5141263",
"0.51400936",
"0.5129783",
"0.511841",
"0.5117947"
] |
0.7932269
|
0
|
Point the ProjectManager towards a temporary directory instead of `user_data_dir`. Used exclusively for tests.
|
def _use_temp_directory(self):
if not self._is_temp_dir:
self._orig_base_data_dir = self._base_data_dir
self._orig_base_logs_dir = self._base_logs_dir
temp_dir = Path(tempfile.mkdtemp())
self._base_data_dir = temp_dir / "data"
self._base_logs_dir = temp_dir / "logs"
self.db.change_path(":memory:")
self.set_current("default", update=False)
self._is_temp_dir = True
return temp_dir
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _create_user_data_dir(self):\n self._user_data_dir = tempfile.TemporaryDirectory(prefix=f'{self.USER_DATA_DIR_PREFIX}_tmp_')\n return self._user_data_dir.name",
"def _temp_dir(self):\n tmp_dir = os.path.join(self.output_dir, self.config.find_tune[\"run_dir\"])\n try:\n os.makedirs(tmp_dir)\n except OSError:\n pass\n os.chdir(tmp_dir)\n self.tmp_dir = \"./\"",
"def getSystemTmpDir():\n return os.path.join(Configurations.getProjectRootDir(), TEMP_DIR_NAME)",
"def get_new_temp_dir(self):\n return self.useFixture(fixtures.TempDir())",
"def fixture_project_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"data\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))",
"def gradio_temp_dir(monkeypatch, tmp_path):\n monkeypatch.setenv(\"GRADIO_TEMP_DIR\", str(tmp_path))\n return tmp_path",
"def setTmpDir(self):\n\t\tif os.name != 'nt':\n\t\t\t# On unix use /tmp by default\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"/tmp\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\telse:\n\t\t\t# On Windows use the current directory\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\t\tself.tmpDir = os.environ.get(\"TEMP\", self.tmpDir)\n\t\tif not os.path.isdir(self.tmpDir):\n\t\t\tself.tmpDir = \"\"\n\t\telif not os.access(self.tmpDir, os.F_OK + os.W_OK):\n\t\t\tself.tmpDir = \"\"",
"def get_temp_dir():\n return tempfile.mkdtemp()",
"def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir",
"def tmp_data_directory(tmp_path_factory):\n return str(tmp_path_factory.mktemp(\"datathon-mlapp-starter\"))",
"def tempdir(self):\n path = tempfile.gettempdir()\n return os.path.join(path, 'parquet-index-test-' + str(uuid.uuid4()))",
"def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname",
"def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname",
"def secure_temp_dir(context):\n tmpd = tempfile.TemporaryDirectory()\n context.tempdir = tmpd",
"def tempdir():\n\n # Create a directory and return the path\n return tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()",
"def setUp(self):\n tempDir.safe_mkdir(parents=True)\n os.chdir(tempDir.as_posix())",
"def getTmpdir(self):\n pass",
"def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()",
"def move_from_temp_directory(self):",
"def get_temp_dir():\n return settings.FILE_STORE_TEMP_DIR",
"def create_base_temp_dir(cls):\n if cls._thread_local.state.temp_dirs:\n base_temp_dir = os.path.join(cls._thread_local.state.temp_dirs[-1],\n cls._TEMP_SUBDIR)\n else:\n raise ValueError(\n 'A tf.Transform function that required a temp dir was called but no '\n 'temp dir was set. To set a temp dir use the impl.Context context '\n 'manager.')\n tf.gfile.MakeDirs(base_temp_dir)\n return base_temp_dir",
"def setUp(self):\n self.tmp = TemporaryDirectory()",
"def setUp(self):\n self.tmpdir = mkdtemp()"
] |
[
"0.73173493",
"0.72052014",
"0.7195199",
"0.70279366",
"0.6918659",
"0.67983097",
"0.67747945",
"0.67627513",
"0.6749146",
"0.6715924",
"0.67100096",
"0.6691613",
"0.6691613",
"0.6640233",
"0.6619128",
"0.6543333",
"0.6543333",
"0.6543333",
"0.6543333",
"0.6543333",
"0.6543333",
"0.6543333",
"0.6533469",
"0.65049803",
"0.6471697",
"0.64503723",
"0.64410496",
"0.6430634",
"0.64291966",
"0.63945"
] |
0.72850245
|
1
|
Point the ProjectManager back to original directories. Used exclusively in tests.
|
def _restore_orig_directory(self):
if not self._is_temp_dir:
return
self._base_data_dir = self._orig_base_data_dir
del self._orig_base_data_dir
self._base_logs_dir = self._orig_base_logs_dir
del self._orig_base_logs_dir
self.db.change_path(self._base_data_dir / "projects.db")
self.set_current("default", update=False)
self._is_temp_dir = False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset(self):\r\n self._root_dir = None",
"def resetWorkingDirectory( self ):\n self.cwd = self.path",
"def tearDown(self):\n # unittest.TestCase.tearDown(self)\n\n root = os.path.join(\".\", \"files\")\n endingList = os.listdir(root)\n rmList = [fn for fn in endingList if fn not in self.startingList]\n\n if self.oldRoot == root:\n for fn in rmList:\n fnFullPath = os.path.join(root, fn)\n if os.path.isdir(fnFullPath):\n os.rmdir(fnFullPath)\n else:\n os.remove(fnFullPath)\n\n os.chdir(self.oldRoot)",
"def movedir(self):\n pass",
"def reset_cache_dir(self):\n self.manager.reset_cache_dir()",
"def reset_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"rm -rf project.state repos\")\n ctx.run(\"newt -v upgrade\")",
"def change_dir_without_context_manager(filename1, filename2):",
"def move_from_temp_directory(self):",
"def test_replace_project(self):\n pass",
"def teardown_method(self, method):\n os.chdir(self.old_pwd)\n shutil.rmtree(self.test_workspace, True)",
"def reset_download_dir(self):\n self.manager.reset_download_dir()",
"def reset():\n local('cd {{ project_name }} && \\\n rm -rf static && rm -rf gzip && rm -rf build')",
"def reset_backup_folder(self):\n pass",
"def tearDown(self):\n os.rmdir(self.cur_source)\n super().tearDown()",
"def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)",
"def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)",
"def setUp(self):\r\n # this lets us delete the workspace after its done no matter the\r\n # the rest result\r\n self.workspace_dir = tempfile.mkdtemp()",
"def reset(self):\n self.reset_cache_dir()\n self.reset_download_dir()",
"def cd_up(self):\n parts = self.cwd.split(\"\\\\\")\n self.cwd = \"\"\n for i in parts[:-1]:\n self.cwd += i + \"\\\\\"\n self.cwd = self.cwd[:-1]",
"def previous_directory(self):\r\n prev_dir = Path(self.path_viewer.text()).parent\r\n self.set_new_path(str(prev_dir))",
"def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)",
"def reset():\n\n local(\"rm -rf ./build\")\n local(\"mkdir ./build\")",
"def reset(self):\n q.system.fs.removeDirTree(self.metadataPath)\n self.__init__(self.metadataPath,self.root)",
"def reset_cache_dir(self):\n self._set_cache_dir(self._get_default_cache_dir())",
"def reset(self):\n def remove_auxiliary_dir():\n egg_info_dir = self.project_name_sc + \".egg-info\"\n remove_directories([\n egg_info_dir,\n \".env\",\n \".eggs\",\n \".pytest_cache\",\n \"build\",\n \"dist\",\n \".cache\",\n \".benchmark\",\n \".tox\",\n \".vagrant\",\n \".tox\"])\n remove_files([\n \".coverage\",\n \".doit.db\",\n \".doit.bak\",\n \".doit.dat\",\n \".doit.dir\",\n ])\n\n # TODO(lschneider): Remove unnecessary files without command lines.\n # This code could be run directly from this function. However\n # the pathlib library is not part of the standard python 2.\n prefix = \"python -c \\\"import pathlib; \"\n delete_pyfiles = prefix + \"import pathlib; [p.unlink() for p in pathlib.Path('.').rglob('*.py[co]')]\\\"\"\n delete_dirs = prefix + \"import pathlib; [p.rmdir() for p in pathlib.Path('.').rglob('__pycache__')]\\\"\"\n\n return {\n \"actions\": [\n delete_pyfiles,\n delete_dirs,\n remove_auxiliary_dir,\n ],\n \"verbosity\": 2\n }",
"def restore_cwd():\n cwd = os.getcwd()\n try:\n yield\n finally:\n os.chdir(cwd)",
"def tearDown(self):\r\n shutil.rmtree(self.workspace_dir)",
"def cleanup(self):\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n \r\n path = os.path.join(session.home, project.name)\r\n project.work_area(False, True, True, path=path)\r\n \r\n if (result != None):\r\n _logger.info(\"Project found: '%s'\" % result)\r\n role = session.role\r\n co_role = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n session.role = co_role\r\n try:\r\n delResult = result.delete(scope='project_and_subproject_hierarchy')\r\n finally:\r\n session.role = role\r\n ccm.log_result(delResult, ccm.CHECKOUT_LOG_RULES, _logger)",
"def ChangeDir(self, path: str) -> None:\n ...",
"def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()"
] |
[
"0.67874736",
"0.6360402",
"0.6320904",
"0.6313339",
"0.6269328",
"0.6205224",
"0.6093901",
"0.6037744",
"0.60184413",
"0.6004511",
"0.5974991",
"0.59655535",
"0.5955894",
"0.5949952",
"0.5928603",
"0.5928603",
"0.58895755",
"0.5884733",
"0.5855362",
"0.584434",
"0.58135843",
"0.5804033",
"0.5801782",
"0.57968503",
"0.57946044",
"0.57930875",
"0.57875365",
"0.5786787",
"0.5765251",
"0.5739572"
] |
0.75356644
|
0
|
Delete project ``name``, or the current project. ``name`` is the project to delete. If ``name`` is not provided, delete the current project. By default, the underlying project directory is not deleted; only the project name is removed from the list of active projects. If ``delete_dir`` is ``True``, then also delete the project directory. If deleting the current project, this function sets the current directory to ``default`` if it exists, or to a random project. Returns the current project.
|
def delete_project(self, name=None, delete_dir=False):
victim = name or self.current
if victim not in self:
raise ValueError("{} is not a project".format(victim))
if len(self) == 1:
raise ValueError("Can't delete only remaining project")
ProjectDataset.delete().where(ProjectDataset.name == victim).execute()
if delete_dir:
dir_path = self._base_data_dir / safe_filename(victim)
assert dir_path.is_dir(), "Can't find project directory"
shutil.rmtree(dir_path)
if name is None or name == self.current:
if "default" in self:
self.set_current("default")
else:
self.set_current(next(iter(self)).name)
return self.current
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None",
"def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()",
"def delete_namespaced_project(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def delete_project(self, project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_delete.click()\n\n page_projects.form_delete_project_confirm.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_absence()",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete_project_by_name(self, project_name):\n with self._transaction.cursor() as cur:\n # delete associations between this project and any barcodes\n cur.execute(\"DELETE FROM barcodes.project_barcode \"\n \"WHERE project_id in (\"\n \"SELECT project_id FROM barcodes.project \"\n \"WHERE project = %s)\",\n (project_name,))\n\n # now delete the project itself\n cur.execute(\"DELETE FROM barcodes.project WHERE project = %s\",\n (project_name,))\n return cur.rowcount == 1",
"def clean_project(self, app_name=None, delete_all=False):\n\n if not app_name and not delete_all:\n ConuException(\"You need to specify either app_name or set delete_all=True\")\n\n if delete_all:\n args = [\"--all\"]\n logger.info('Deleting all objects in current project')\n else:\n args = \"-l app=%s\" % app_name\n logger.info('Deleting all objects with label app=%s', app_name)\n\n try:\n o = run_cmd(self._oc_command([\"delete\", \"all\", args]),\n return_output=True)\n o_lines = o.split('\\n')\n for line in o_lines:\n logger.info(line)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cleanup failed because of exception: %s\" % ex)",
"def delete_project(arn=None):\n pass",
"def delete(self, name, project=None):\n qlist = self._list(project)\n key = self._queue(project, name)\n self._db.delete(key)\n self._db.zremrangebyscore(qlist, -1, 1)",
"def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True",
"def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name",
"def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))",
"def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))",
"def delete_project(self, project_id):\n _url = f\"{self.base_url}/projects/{project_id}\"\n self.http_call(\"delete\", _url)\n return",
"def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()",
"def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response",
"def delete(self, *, name: types.TSeedName) -> None:\n if not (self._base_path / self._get_file_name(name)).exists():\n raise exceptions.SeedNotFoundError(f\"could not find seed {name}\")\n (self._base_path / self._get_file_name(name)).unlink()",
"def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True",
"def delete_folder(self, name):\n return self.DeleteFolder(name, 0)",
"def get_project(self, name=None):\n if not name:\n if not self.select_project:\n log.error(\"no default project name specified\")\n return\n name = self.select_project\n\n if name in self.projects:\n return self.projects[name]\n\n log.debug( \"project {} not found in {} projects \".format(name, len(self.projects)) )\n return None",
"def DelProject(projname):\n\tif projname == \"\" or projname == None:\n\t\tpjnm = raw_input(\"\\nNombre del proyecto: \").lower()\n\t\tif pjnm == \"\" or pjnm == None:\n\t\t\tcancel()\n\telse:\n\t\t# Proceso para borrar todo el proyecto\n\t\tpass\n\n\tpa = open(\"author_name.txt\", \"r\")\t#Abre el archivo con el nombre del autor\n\tpa.read()\n\tpc = open(\"project_code.txt\", \"r\")\t#Abre el archivo con el codigo de proyecto\n\tpc.read()\n\n\tuserpa = raw_input(\"Ingrese el nombre del autor: \").lower()\n\tuserpc = raw_input(\"Ingrese el codigo del proyecto: \").lower()\n\n\tif userpa == pa and userpc == pc:\t#Se verifica que userpa(nombre del autor por el usuario) sea igual a pa(nombre original del autor) y lo mismo con el codigo del proyecto\n\t\tprint \"Iniciando el Borrado del Proyecto...\"\n\t\tpcommands.del_project()\n\t\tprint \"El proyecto se ha borrado con exito!\"\n\telse:\n\t\tprint \"El codigo del proyecto o el nombre del autor no es correcto.\"\n\t\tcancel()",
"def delete_dir(name):\n root_dir = get_data_dir()\n target_dir = root_dir / name\n if not is_relative_to(target_dir, root_dir) or target_dir == root_dir:\n return False\n try:\n shutil.rmtree(target_dir)\n return True\n except FileNotFoundError:\n return False",
"async def delete(self, ctx, project_name: str) -> None:\n if not ctx.projects.find_project(project_name):\n channel = discord.utils.get(\n ctx.guild.channels, name=f\"{project_name}-project\")\n\n if channel and channel.category.name == \"Flux Projects\":\n if ctx.author.permissions_in(channel).manage_channels:\n message = await ctx.send(\"That project doesn't appear to\"\n \" exist in my database, but the \"\n \"channel still exists. \"\n \"Would you like to delete it?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\",\n check=lambda reaction, user: (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n await channel.delete(reason=\"Project not found.\")\n await ctx.send(\"The channel was deleted sucessfully.\")\n return\n\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the channel.\")\n return\n\n else: # If author doesn't have access to deleting channels.\n await ctx.send(\"That project does not appear to be in my \"\n \"database, but the channel for it still \"\n \"exists. Please have someone with\"\n \" manage channels run this chommand.\"\n )\n return\n else:\n await ctx.send(\"I could not find this project.\")\n return\n\n if str(ctx.author.id) != ctx.projects.find_project(project_name).get(\n \"owner\"):\n await ctx.send(\"Only the project owner \"\n \"can delete this project.\")\n return\n message = await ctx.send(\"This action __cannot__ be undone. \"\n \"Once you do this, everything is gone. \"\n \"Are you sure you want to continue?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\", check=lambda reaction, user:\n (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n channel = ctx.projects.find_project(\n project_name).get(\"channel\")\n channel = discord.utils.get(ctx.guild.channels,\n id=int(channel))\n ctx.projects.delete_project(project_name)\n if channel:\n await channel.delete(reason=\"Project deleted.\")\n await ctx.send(\"The project has been deleted.\")\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the project.\")",
"def delete_proj_user_by_name(self, name):\n conn = pyone.OneServer(\n self.auth_url,\n session=\"{0}:{1}\".format(self.username, self.password)\n )\n userpool = conn.userpool.info(-1,-1,-1)\n for user in userpool.USER:\n if user.get_NAME() == name:\n group = user.get_GROUPS()[0]\n # delete group\n conn.group.delete(group)\n # delete user\n return conn.user.delete(user.get_ID())\n logger.warning(\"Delete user ONE: user does not exist: \", name)",
"def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )"
] |
[
"0.6074884",
"0.5668781",
"0.5642265",
"0.538783",
"0.5384421",
"0.5348353",
"0.5290247",
"0.52060354",
"0.5159435",
"0.5111131",
"0.5070248",
"0.4955428",
"0.49289957",
"0.4893168",
"0.4855105",
"0.4854254",
"0.4787456",
"0.47696927",
"0.47509116",
"0.4750903",
"0.47246382",
"0.47197455",
"0.46847486",
"0.46806452",
"0.46778855",
"0.46740106",
"0.46736452",
"0.4613256",
"0.4613",
"0.46127498"
] |
0.79898983
|
0
|
Give a report on current projects, including installed databases and file sizes. Returns tuples of ``(project name, number of databases, size of all databases (GB))``.
|
def report(self):
from . import databases
_current = self.current
data = []
def get_dir_size(dirpath):
"""Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does.
Does not follow symbolic links"""
return sum(
sum(os.path.getsize(root / name) for name in files)
for root, dirs, files in os.walk(dirpath)
)
names = sorted([x.name for x in self])
for obj in names:
self.set_current(obj, update=False, writable=False)
data.append((obj, len(databases), get_dir_size(projects.dir) / 1e9))
self.set_current(_current)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_project_stats(self, pool, project):\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting project stats: '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n avail = val['project']['space_available']\n return avail",
"def get_plant_stats(db_path: str) -> int:\n return get_db_count(db_path, 'company_data.db', 'plants')",
"def stats(self):\r\n\r\n self.downloads # explicitly call, so we have first/last upload data\r\n fmt = locale.nl_langinfo(locale.D_T_FMT)\r\n sep = lambda s: locale.format('%d', s, 3)\r\n val = lambda dt: dt and dt.strftime(fmt) or '--'\r\n\r\n params = (\r\n self.package_name,\r\n val(self.first_upload),\r\n self.first_upload_rel,\r\n val(self.last_upload),\r\n self.last_upload_rel,\r\n sep(len(self.releases)),\r\n sep(self.max()),\r\n sep(self.min()),\r\n sep(self.average()),\r\n sep(self.total()),\r\n )\r\n\r\n print \"\"\"PyPI Package statistics for: %s\r\n\r\n First Upload: %40s (%s)\r\n Last Upload: %40s (%s)\r\n Number of releases: %34s\r\n Most downloads: %35s\r\n Fewest downloads: %35s\r\n Average downloads: %35s\r\n Total downloads: %35s\r\n\"\"\" % params",
"def calculate_project_size(project_id=None):\n import traceback\n from models import ProjectScanReceipt\n if project_id is not None and not id_validator.match(project_id):\n raise ValueError(\"{0} is not a valid vidispine id\".format(project_id))\n\n #check if we have been disabled and if so just abort\n if not getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED\",False):\n logger.warning(\"Scan project {0}: Project scanning has been disabled, exiting\".format(project_id))\n return\n\n from projectsizer import update_project_size\n if project_id is not None:\n receipt = ProjectScanReceipt.objects.get(project_id=project_id)\n receipt.last_scan = datetime.now()\n receipt.save()\n start_time = time()\n #project ID of None=>unattached\n last_error = \"\"\n try:\n result = update_project_size(project_id)\n logger.info(\"{0}: Project size information: {1}\".format(project_id, result.storage_sum))\n result.save(project_id=project_id)\n except Exception as e:\n last_error = traceback.format_exc()\n logger.error(last_error)\n\n if project_id is not None:\n receipt = ProjectScanReceipt.objects.get(project_id=project_id)\n receipt.last_scan = datetime.now()\n receipt.last_scan_duration = time() - start_time\n receipt.last_scan_error = last_error\n receipt.save()\n\n logger.info(\"Done\")",
"def stats(self):\n ret = super(DiskCache, self).stats()\n ret[\"root\"] = (self.__env.stat(),)\n for name, database in self.__databases.items():\n with self.__env.begin(database, write=False) as txn:\n ret[name] = txn.stat(database)\n\n return ret",
"def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size",
"def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours",
"def launch_project_sizing():\n from queries import IN_PRODUCTION_NEED_SCAN, NEW_NEED_SCAN, OTHER_NEED_SCAN\n if not getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED\",False):\n logger.error(\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\")\n return \"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\"\n\n prioritise_old = getattr(settings,\"GNMPLUTOSTATS_PRIORITISE_OLD\",False)\n if prioritise_old:\n logger.warning(\"GNMPLUTOSTATS_PRIORITISE_OLD is set, will only focus on old projects\")\n\n trigger_limit = int(getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_LIMIT\",10))\n to_trigger = []\n c=0\n\n logger.info(\"Gathering projects to measure\")\n\n if not prioritise_old:\n highest_priority = IN_PRODUCTION_NEED_SCAN.order_by('last_scan')\n for entry in highest_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if not prioritise_old and len(to_trigger)<trigger_limit:\n next_priority = NEW_NEED_SCAN.order_by('last_scan')\n for entry in next_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if len(to_trigger)<trigger_limit:\n everything_else = OTHER_NEED_SCAN.order_by('last_scan')\n for entry in everything_else:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n logger.info(\"Projects to scan: \".format(to_trigger))\n if len(to_trigger)==0:\n if prioritise_old:\n logger.error(\"No projects to scan and GNMPLUTOSTATS_PRIORITISE_OLD is set. You should disable this now to pick up new projects\")\n logger.info(\"No projects need to be scanned right now\")\n\n n=0\n for entry in to_trigger:\n n+=1\n calculate_project_size.apply_async(kwargs={'project_id': entry.project_id},queue=getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_QUEUE\",\"celery\"))\n return \"Triggered {0} projects to scan\".format(n)",
"def summary(self):\n\t\tprint \"Summary--------------------------------------:\"\n\t\tprint \"Available data sources are:\"\n\t\tfor path in self.available_databases:\n\t\t\tprint path",
"def get_plant_family_stats(db_path: str) -> int:\n return get_db_count(db_path, 'company_data.db', 'plant_families')",
"def get_download_info(files):\n file_paths = [] # the files we need to check\n file_count = 0 # count of each file in files\n total_size = 0\n\n all_product_types = []\n for ring_obs_id in files:\n for product_type in files[ring_obs_id]:\n for f in files[ring_obs_id][product_type]:\n\n all_product_types.append(product_type)\n\n if product_type != 'preview_image':\n # this is a pds file not a browse product\n # collect the urls.. we will process these at the end\n file_paths += [f for f in files[ring_obs_id][product_type]] # list of all urls\n\n elif product_type == 'preview_image':\n # the file size of each preview images on disc is checked here\n # todo: OMG WHY WHAT\n # todo: get the file sizes into database instead = process like pds files and remove this whole section!\n\n from results.views import get_base_path_previews\n try:\n size = getsize(f)\n total_size += size\n file_count = file_count + 1\n except OSError:\n log.error('could not find file: ' + f)\n\n all_product_types = list(set(all_product_types)) # make unique\n # now we have all pds file_names, put all file names in a list and get their count\n if file_paths:\n\n file_names = list(set([ get_file_path(u) for u in file_paths]))\n file_count += len(file_names)\n\n # query database for the sum of all file_names size fields\n file_sizes = FileSizes.objects.filter(name__in=file_names, PRODUCT_TYPE__in=all_product_types).values('name','size','volume_id').distinct()\n total_size += sum([f['size'] for f in file_sizes]) # todo: this is here b/c django was not happy mixing aggregate+distinct\n\n return total_size, file_count # bytes",
"def get_db_info(self):\n total = 0\n info = {\n 'count': {},\n 'types': {}\n }\n for name in self._object_types:\n id, attrs, idx = self._object_types[name]\n info['types'][name] = {\n 'attrs': attrs,\n 'idx': idx\n }\n row = self._db_query_row('SELECT COUNT(*) FROM objects_%s' % name)\n info['count'][name] = row[0]\n total += row[0]\n\n info['total'] = total\n\n info['termcounts'] = {}\n for ivtidx in self._inverted_indexes:\n row = self._db_query_row('SELECT COUNT(*) FROM ivtidx_%s_terms' % ivtidx)\n info['termcounts'][ivtidx] = int(row[0])\n\n info['file'] = self._dbfile\n return info",
"def get_project_count(db):\n\n count = 0\n for element in db:\n count += 1\n return count",
"def build_progress_report(self):\n\n report = {\n 'packages' : self._packages_section(),\n 'metadata' : self._metadata_section(),\n 'publishing' : self._publishing_section(),\n }\n return report",
"def projects_summary(self, is_print=True):\n _projects_summary = []\n for _p in self.get_projects():\n # Retrieve the project stats\n _stats = self.http_call(\n \"get\", f\"{self.base_url}/projects/{_p['project_id']}/stats\"\n ).json()\n if is_print:\n print(\n f\"{_p['name']}: {_p['project_id']} -- Nodes: {_stats['nodes']} -- \"\n f\"Links: {_stats['links']} -- Status: {_p['status']}\"\n )\n _projects_summary.append(\n (\n _p[\"name\"],\n _p[\"project_id\"],\n _stats[\"nodes\"],\n _stats[\"links\"],\n _p[\"status\"],\n )\n )\n\n return _projects_summary if not is_print else None",
"def get_stats():\r\n stats = {\r\n \"progress_precent\": 100.0*finished_work_units_amount/work_units_amount,\r\n \"results\": None if work_status == Db.WorkStatusNames.finished_work.value else Db.collect_results(),\r\n #If it's already finished, then all the results were already sent to the main server.\r\n }\r\n return stats",
"def stats(request):\n stats = []\n activeProject = None\n activity = get_activity()\n if activity:\n activeProject = activity.project\n projects = Project.objects.filter(company__id=1).order_by('name')\n for project in projects:\n isCurrent = (activeProject != None) and (project.id == activeProject.id)\n # If this is NOT the currently selected project...\n if (not isCurrent) or (not activeProject):\n # If this project is password protected, skip it.\n if (project.password != None) and (len(project.password) > 0):\n continue\n sessions = Session.objects.filter(project=project,\n endtime__gt=F('starttime') +\n timedelta(minutes=3))\n files = File.objects.filter(project=project)\n fileactions = Fileaction.objects.filter(file__in=files)\n events = Event.objects.filter(session__in=sessions)\n sQuery = {\n 'avg': 'SUM(TIMESTAMPDIFF(SECOND, starttime, endtime)) / COUNT(*)',\n 'min': 'MIN(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'max': 'MAX(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'count': 'COUNT(*)'\n }\n sessions = sessions.extra(select=sQuery)\n sessions = sessions.values_list('avg', 'min', 'max', 'count').get()\n session_average_duration = 0\n session_min_duration = 0\n session_max_duration = 0\n if sessions[0] is not None:\n session_average_duration = int(sessions[0])\n if sessions[1] is not None:\n session_min_duration = int(sessions[1])\n if sessions[2] is not None:\n session_max_duration = int(sessions[2])\n session_count = sessions[3]\n statsdata = {\n 'selected': isCurrent,\n 'name': project.name,\n 'session_average_duration': session_average_duration,\n 'session_min_duration': session_min_duration,\n 'session_max_duration': session_max_duration,\n 'session_count': session_count,\n 'file_count': files.count(),\n 'fileaction_count': fileactions.count(),\n 'event_count': events.count()\n }\n stats.append(statsdata)\n return render_to_response(\n 'stats.html',\n {\n 'stats': stats,\n 'tab': 'stats'\n },\n context_instance=RequestContext(request)\n )",
"def dataStats(reportsDir = \"./reports/\"):\n legMulti = glob.glob(reportsDir+\"/leg/*.json\")\n legOne = glob.glob(reportsDir+\"/leg/oneproc/*.json\")\n legBroken = glob.glob(reportsDir+\"/leg/broken/*.json\")\n \n malMulti = glob.glob(reportsDir+\"/mal/*.json\")\n malOne = glob.glob(reportsDir+\"/mal/oneproc/*.json\")\n malBroken = glob.glob(reportsDir+\"/mal/broken/*.json\")\n \n print(\"\"\"Legal files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(legBroken+legMulti+legOne), len(legOne), len(legMulti), len(legBroken)))\n print(\"\"\"Malicious files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(malBroken+malMulti+malOne), len(malOne), len(malMulti), len(malBroken)))\n print(\"Working samples: {0}\".format(len(malMulti+malOne+legMulti+legOne)))",
"def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]",
"def get_resource_usages(self, project_id):\n try:\n # The API call does not give usage for keypair, fixed ips &\n # metadata items. Have raised a bug for that.\n limits = self.nova_client.limits.get(\n tenant_id=project_id).to_dict()\n resource_usage = collections.defaultdict(dict)\n resource_usage['ram'] = limits['absolute']['totalRAMUsed']\n resource_usage['cores'] = limits['absolute']['totalCoresUsed']\n resource_usage['instances'] = \\\n limits['absolute']['totalInstancesUsed']\n # If neutron is not enabled, calculate below resources from nova\n if self.no_neutron:\n resource_usage['security_groups'] = \\\n limits['absolute']['totalSecurityGroupsUsed']\n resource_usage['floating_ips'] = \\\n limits['absolute']['totalFloatingIpsUsed']\n # For time being, keypair is calculated in below manner.\n resource_usage['key_pairs'] = \\\n len(self.nova_client.keypairs.list())\n return resource_usage\n except exceptions.InternalError:\n raise",
"def required_free_space(self):\n sources = glob.glob(self.host_system_config['repos']['src'])\n repos_size = sum(map(utils.dir_size, sources))\n\n return {\n self.host_system_config['repos']['dst']: repos_size,\n self.repo_config_path: 10,\n }",
"def statistics(self):\n \n u_self = resource.getrusage(resource.RUSAGE_SELF)\n\tu_children = resource.getrusage(resource.RUSAGE_CHILDREN)\n\t\n\tpath = os.getenv('TMPDIR')\n\tif not path:\n\t path = os.getcwd()\n\t \n\tdisk = 0 \n\tfor root, dirs, files in os.walk(path): \n\t for d in dirs+files:\n\t disk += os.stat(os.path.join(root, d)).st_size\n\n return dict(\n\t cpu = u_self[0]+u_self[1]+u_children[0]+u_children[1],\n\t memory = (u_self[2]+u_children[2])*resource.getpagesize(),\n\t disk = disk,\n\t time = self.elapsed_time(),\n\t signal = self.signal\n\t)",
"def print_record_project_count(dataframe, dataset=\"full\"):\n if dataset == \"full\":\n print(\n \"For the ORIGINAL cleansed data, containing all available NYC capital \"\n \"projects change records:\\n\"\n )\n\n elif dataset == \"all\":\n print(\n \"For the data containing start and end data for all available \"\n \"NYC capital projects for the ENTIRE INTERVAL of changes \"\n \"covered in the ORIGINAL data:\\n\"\n )\n\n else:\n print(\n \"For the final {} data, containing the {} split of 3-year \"\n \"project data used in this analysis:\\n\".format(\n dataset.upper(), dataset\n )\n )\n\n # entries\n print(f\"\\tNumber of dataset records: {len(dataframe)}\")\n\n # num projects\n print(\n f\"\\tNumber of unique projects in dataset: {dataframe['PID'].nunique()}\\n\"\n )",
"def num_projects(self):\n return self._num_projects",
"def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))",
"def output_queue_size(self):\r\n results_dirname = get_param('results_dir')\r\n filename = os.path.join(results_dirname,\r\n '%s_%s' % (get_param('file_prefix'),\r\n 'queued_tasks'))\r\n queued_tasks_file = open(filename, 'w')\r\n queued_tasks_file.write('time\\ttotal_queued_tasks\\n')\r\n for time, queued_tasks in self.enqueued_tasks:\r\n queued_tasks_file.write('%s\\t%s\\n' % (time, queued_tasks))\r\n queued_tasks_file.close()",
"def status():\n used = get_space_used()\n avail = get_space_available()\n allowed = config.download.space_to_use\n print \"Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)\" % \\\n (used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)\n print \"Space available on file system: %.2f GB\" % (avail/1024.0**3)\n\n numwait = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='waiting'\", \\\n fetchone=True)\n numfail = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of requests waiting: %d\" % numwait\n print \"Number of failed requests: %d\" % numfail\n\n numdlactive = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='downloading'\", \\\n fetchone=True)\n numdlfail = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of active downloads: %d\" % numdlactive\n print \"Number of failed downloads: %d\" % numdlfail",
"def fullreport(self):\n print \"\"\n print \"Liten2 Full Reporting\"\n print \"--------------------------------------\"\n for getsize in self.size_searched():\n print \"File Size searched:\\t %s MB\" % self.humanvalue(getsize[0]) \n print \"Total MB wasted:\\t %s MB\" % self.totalmb()\n for i in self.file_num():\n print \"Files found over %s MB:\\t %s\" % (self.humanvalue(getsize[0]), i[0])\n for i in self.total_files():\n print \"Total files searched:\\t %s\" % i[0]\n for dup_count in self.count_dups():\n print \"\"\n print \"Total Duplicate files found:\\t %s\" % dup_count[0]\n print \"--------------------------------------\"\n for paths in self.path_dups():\n print paths[0]"
] |
[
"0.62372994",
"0.59644437",
"0.5955234",
"0.5950448",
"0.5801446",
"0.57668406",
"0.573293",
"0.5653337",
"0.56500965",
"0.56373423",
"0.5555827",
"0.55301195",
"0.54763895",
"0.5459824",
"0.54540616",
"0.54517734",
"0.5411624",
"0.53499293",
"0.53321147",
"0.5308962",
"0.5297413",
"0.52956265",
"0.528289",
"0.52744114",
"0.5255578",
"0.5255291",
"0.5245796",
"0.5244222",
"0.5218012",
"0.5202033"
] |
0.7339567
|
0
|
Function that generates a donorvector. If there is >=3 searchvariables then the adaptive scaling factor is implimented. Otherwise just the constant. It gnerates candidates for the donorvector by randomly choosing rows from the initial matrix, but not the ith element. Paramaters
|
def mutation(i,N_p,t,T,P,N_vars,F_min,F_const):
#Adaptive scaling factor
if N_vars >= 3:
F=F_min*2**np.exp(1-(T/(T+1-t)))
else:
F = F_const
#candidates are assigned without the i-th element
candidates= np.delete(np.arange(N_p), np.where(np.arange(N_p)==i))
#3 target vectors are picked out randomly for the donorvector generator
cand_rand=np.random.choice(candidates,3,replace= False)
X1=P[cand_rand[0],]
X2=P[cand_rand[1],]
X3=P[cand_rand[2],]
#Donorvctor generator
V= X1 + F*(X2-X3)
return V
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)",
"def generateProbabilisticVector(size, empty):\n\n res = np.zeros(size)\n\n if(empty):\n\n #Generate the indexes to populate\n emptyVecIndexes = np.random.choice(2,size, p=[LINK_EMPTINESS_RATIO,1-LINK_EMPTINESS_RATIO])\n #Generate a random vector of the right size\n randomVec = np.random.random(size)\n\n #populate those vectors\n res[emptyVecIndexes==1] = randomVec[emptyVecIndexes==1]\n\n else:\n #Generate an uniform random vector of the right size\n res = np.random.uniform(0,1,size)\n\n #Normalize the res vector\n total = res.sum()\n if(total == 0):\n return res\n else:\n return res/total",
"def initialize_proximity_vector(size: int, seeds: List[int]) -> np.array:\r\n\r\n ## Generate a vector of zeros and set the seeds\r\n vector = np.zeros(size, dtype=np.double)\r\n vector[seeds] = 1.0\r\n\r\n ## If there are multiple seeds then equally weight them\r\n return (vector / vector.sum())",
"def get_repulsion_vector(item: str) -> np.ndarray:\n return self._centroids[np.random.choice(cluster_ids, p=sim_map[item])]",
"def get_donor_vec(self, donor_index):\n nl = NeighborList(natural_cutoffs(self), self_interaction=False, bothways=True)\n nl.update(self)\n # gets neighbors of donor atom and adds the vectors from neighbor to donor\n # for most donor atoms this is roughly in the proper binding direction\n donor_neighbors = nl.get_neighbors(donor_index)[0] # neighbor's index\n donor_vec = np.array([0, 0, 0])\n for i in donor_neighbors:\n a = self.get_distance(i, donor_index, vector=True)\n donor_vec = donor_vec + a\n if np.linalg.norm(donor_vec) == 0:\n warnings.warn(\"donor vector with magnitude 0 found, providing default vector\")\n return np.array([1, 0, 0])\n\n donor_vec = donor_vec / np.linalg.norm(donor_vec) # normalizes donor vec\n return donor_vec",
"def __init__(self, solution_length):\n self.solution_vector = [random.randint(0, 1) for i in range(solution_length)]",
"def WeightInitializer():\n return np.random.uniform(-1, 1)",
"def nonuniform_mutation(random, candidate, args):\r\n bounder = args['_ec'].bounder\r\n num_gens = args['_ec'].num_generations\r\n max_gens = args['max_generations']\r\n strength = args.setdefault('mutation_strength', 1)\r\n exponent = (1.0 - num_gens / float(max_gens)) ** strength\r\n mutant = copy.copy(candidate)\r\n for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):\r\n if random.random() <= 0.5:\r\n new_value = c + (hi - c) * (1.0 - random.random() ** exponent)\r\n else:\r\n new_value = c - (c - lo) * (1.0 - random.random() ** exponent)\r\n mutant[i] = new_value\r\n return mutant",
"def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)",
"def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1",
"def default_potential(pos, parameter): #define the potential\n mod_parameter = parameter[0]\n x0 = jnp.ones(pos.shape)\n return (1. - mod_parameter)*jnp.dot(pos, pos) + mod_parameter*jnp.dot(pos - x0, pos - x0)",
"def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])",
"def constructor(self, random, args):\r\n self._use_ants = True\r\n candidate = []\r\n while len(candidate) < len(self.weights) - 1:\r\n # Find feasible components\r\n feasible_components = []\r\n if len(candidate) == 0:\r\n feasible_components = self.components\r\n elif len(candidate) == len(self.weights) - 1:\r\n first = candidate[0]\r\n last = candidate[-1]\r\n feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] == first.element[0]]\r\n else:\r\n last = candidate[-1]\r\n already_visited = [c.element[0] for c in candidate]\r\n already_visited.extend([c.element[1] for c in candidate])\r\n already_visited = set(already_visited)\r\n feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] not in already_visited]\r\n if len(feasible_components) == 0:\r\n candidate = []\r\n else:\r\n # Choose a feasible component\r\n if random.random() <= self.bias:\r\n next_component = max(feasible_components)\r\n else:\r\n next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0]\r\n candidate.append(next_component)\r\n return candidate",
"def gen_small(s, n):\n\tdeg = n\n\tcoeff_vector = deg*[_sage_const_0 ]\n\tcoeff_vector[deg-_sage_const_1 ] = _sage_const_1 \n\tcoeff_vector[_sage_const_0 ] = _sage_const_1 \n\tindex_set = set({_sage_const_0 ,deg-_sage_const_1 })\n\tfor i in range(s-_sage_const_2 ):\n\t# add 1's\n\t\twhile True:\n\t\t\tindex1 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index1 in index_set:\n\t\t\t\tcoeff_vector[index1] = _sage_const_1 \n\t\t\t\tindex_set = index_set.union({index1})\n\t\t\t\tbreak\n\t# add -1's\n\tfor i in range(s):\n\t\twhile True:\n\t\t\tindex2 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index2 in index_set:\n\t\t\t\tcoeff_vector[index2] = -_sage_const_1 \n\t\t\t\tindex_set = index_set.union({index2})\n\t\t\t\tbreak\n\treturn coeff_vector",
"def initDE(N_p,lb,ub,prob):\n\n\n\n lb = np.full(N_p,lb)\n \n ub = np.full(N_p,ub)\n \n f = np.zeros((N_p,1)) #empty vector for fitness function\n \n fu = np.zeros((N_p,1))#newly created trial vector\n\n D = len(lb) # Determining amount of decision variables\n \n U = np.zeros((N_p,D)) #Matrix for storing trial solutions \n \n #Initial random population \n P = mat.repmat(lb,N_p,1)+mat.repmat((ub-lb),N_p,1)*np.random.rand(len(ub-lb),N_p)\n \n for p in np.arange(N_p):\n f[p]=prob(P[p,])\n \n return lb,ub,f,fu,D,U,P",
"def randvonmises(anglegram, i, kappa_scalar=8, random_state=1):\n np.random.seed(random_state)\n \n xtorsion = torch.linspace(-np.pi, np.pi, 36)\n \n vmexp = torch.sum(xtorsion * anglegram[0, 1:, 0, i])\n vmvar = torch.sum(anglegram[0, 1:, 0, i] * (xtorsion - vmexp) ** 2)\n \n vmkappa = 1 / vmvar\n \n randvar = vonmises.rvs(kappa=kappa_scalar * vmkappa, loc=vmexp)\n if randvar < -np.pi:\n randvar = 2 * np.pi + randvar\n elif randvar > np.pi:\n randvar = - 2 * np.pi + randvar\n return randvar",
"def parameter_proposal(w, k = 200, sigma = 3):\n noise = np.random.randn(k, *np.shape(w))\n return (np.expand_dims(w, 0) + sigma * noise, noise)",
"def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]",
"def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x",
"def default_variation(random, candidates, args):\r\n return candidates",
"def default_variation(random, candidates, args):\r\n return candidates",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha",
"def gen_sol(self, start, n_candidats):\n sol = np.zeros(self.n_objets)\n poidrestant = self.W\n visited = set() # liste des objets visité\n # ajouter le premier objet\n r = rn.randint(1, poidrestant // self.poids[start])\n sol[start] = r\n poidrestant -= self.poids[start] * r\n gain = r * self.benifices[start]\n visited.add(start) # ajouter le debut a la liste civité\n\n # la liste candidates avec les pheromones mis a jours localement (0 sur les visited)\n candidats, pheromones = self.listeCandidate(\n self.pheromone, visited, n_candidats\n )\n\n for i in candidats:\n # Choisir le prochain objets parmi les candidats ainsi que le nombre\n move, nb = self.pick_move(\n pheromones, candidats, n_candidats, self.utilites, visited\n )\n toPop = candidats.index(move)\n candidats.pop(toPop)\n n_candidats -= 1\n np.delete(\n pheromones, toPop\n ) # rendre le pheromone à 0 pour indiquer qu'il a été visité\n\n # Mise a jour poidRestant et gain de la solution\n poidrestant -= self.poids[move] * nb\n while poidrestant < 0:\n nb -= 1\n poidrestant += self.poids[move]\n\n sol[move] = nb\n gain += nb * self.benifices[move]\n\n # ajouter l'objet a visited\n visited.add(move)\n # print(\"s\",i,sol,gain)\n return sol, gain, self.W - poidrestant",
"def sample_placements(mutated_params: torch.Tensor) -> torch.Tensor:\n # cumsum = torch.cumsum(torch.softmax(mutated_params, dim=-1), dim=-1)\n # noise = torch.rand(mutated_params.shape[0], mutated_params.shape[1], 1, requires_grad=False)\n # indices = torch.argmin((cumsum < noise).int(), dim=-1)\n indices = torch.distributions.Categorical(logits=mutated_params).sample()\n return indices",
"def generador_n(vector_v, constante):\n\n n = []\n\n for x in range(len(vector_v)):\n nn = vector_v[x] * constante\n n.append(nn)\n\n # print(\"valores v: \", vector_v)\n # print(\"valores n: \", n)\n\n return n",
"def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)",
"def random_choice(options, weights): \n r = random.random()\n for i, c in enumerate(cumsum(weights)):\n if r <= c:\n return options[i]",
"def random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=True):\n A = A.copy()\n edges_removed = []\n budget = np.max(budgets)\n results_info = []\n for i in range(budget):\n if (len(C) == 0):\n # Maximum balance achieved -> budget high.\n results_info = update_res(results_info, budgets, time.time() - start_time, len(x_v.nonzero()[0]) - len(S))\n break\n while (True):\n try:\n e_chosen = C[np.random.choice(range(len(C)))]\n except:\n results_info = update_res(results_info, budgets, time.time() - start_time, len(x_v.nonzero()[0]) - len(S))\n return results_info, np.nonzero(x_v)[0], A, edges_removed\n if (is_connected_postdel(delete_edge(A.copy(), e_chosen), e_chosen)):\n break\n else:\n C.remove(e_chosen)\n edges_removed.append(e_chosen)\n try:\n ue = node_out (x_v, e_chosen)\n except:\n print(e_chosen, \" is not on the periphery\")\n return\n A = delete_edge (A, e_chosen)\n x_v[ue] = find_label (A, ue, x_v)\n C.remove(e_chosen)\n if (verbose):\n print(e_chosen, \" is chosen\")\n if (x_v[ue] != 0):\n C, C_i = update_chosen(ue, x_v, A, C)\n # if (verbose):\n # print(\"Edges added to C: \", C_i)\n C = C + C_i\n if (len(edges_removed) in budgets):\n select_time = time.time() - start_time\n results_info.append({\"Budget\": len(edges_removed), \"RT\": select_time, \"Delta\": len(np.nonzero(x_v)[0]) - len(S)})\n if (verbose):\n print(\"\\n\")\n return results_info, np.nonzero(x_v)[0], A, edges_removed",
"def sel_random(individuals, size, replacement=False):\r\n if extra.is_numpy(individuals):\r\n return [np.random.choice(individuals, replace=replacement) for _ in range(size)]\r\n else:\r\n if replacement:\r\n return random.choices(individuals, k=size)\r\n else:\r\n return random.sample(individuals, k=size)"
] |
[
"0.5713445",
"0.5688836",
"0.5642662",
"0.55862236",
"0.5564165",
"0.5489675",
"0.5482834",
"0.54093033",
"0.53940886",
"0.53795874",
"0.5373351",
"0.5359693",
"0.53555995",
"0.5331755",
"0.53237486",
"0.5314485",
"0.52926415",
"0.5291193",
"0.5287574",
"0.5285245",
"0.5285245",
"0.52835983",
"0.5272407",
"0.5271727",
"0.5251521",
"0.5249041",
"0.5244896",
"0.52422124",
"0.52403396",
"0.5232295"
] |
0.64224416
|
0
|
Crossover function for differential evolution. This function uses adaptive crossover rate. The minimum and the maximum range is set by user. It decides whether or not to use donorvector's jth elements in the U matrix. Paramaters
|
def crossover(f,P_c_min,P_c_max,i,D,V,P,U):
#ADAPTIVE Crossover
if f[i] < np.mean(f):
P_c = P_c_min + (P_c_max-P_c_min)*((f[i]-np.mean(f))/(np.max(f)-np.mean(f)))
else:
P_c = P_c_min
delta = np.random.randint(0,D-1)
for j in np.arange(D):
if np.random.uniform(0,1) <= P_c or delta == j:
U[i,j] = V[j]
else:
U[i,j]=P[i,j]
return U
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _cross_over(self,mp,cross_rate,eta):",
"def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])",
"def ciou(pred, target, eps=1e-7):\n # overlap\n lt = torch.max(pred[:, :2], target[:, :2])\n rb = torch.min(pred[:, 2:], target[:, 2:])\n wh = (rb - lt).clamp(min=0)\n overlap = wh[:, 0] * wh[:, 1]\n\n # union\n ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n union = ap + ag - overlap + eps\n\n # IoU\n ious = overlap / union\n\n # enclose area\n enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n cw = enclose_wh[:, 0]\n ch = enclose_wh[:, 1]\n\n c2 = cw**2 + ch**2 + eps\n\n b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n b2_x1, b2_y1 = target[:, 0], target[:, 1]\n b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n rho2 = left + right\n\n factor = 4 / math.pi**2\n v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n # CIoU\n cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))\n return cious",
"def CostFunction(self, out, V, P, params):\n u = self.u\n p = self.p\n puni = self.puni\n xd = self.xd\n xa = self.xa\n l = self.l\n Lagrange_Tracking = 0\n Lagrange_Regularisation = 0\n\n # input regularization\n for name in set(u.keys()):\n Lagrange_Regularisation += puni['weights',name][0]*ca.mtimes((u[name]-p['ref',name]).T,u[name]-p['ref',name])\n\n Lagrange_Regularisation += puni['weights','AoA']*out['AoA']**2\n Lagrange_Regularisation += puni['weights','sslip']*out['sslip']**2\n\n # --- Initialization tracking\n for name in set(xd.keys())- set(['R','E','Drag']):\n Lagrange_Tracking += puni['weights',name][0]*ca.mtimes((xd[name]-p['ref',name]).T,xd[name]-p['ref',name])\n for k in range(9):\n Lagrange_Tracking += ca.reshape(puni['weights','R'][0]*ca.mtimes((xd['R']-p['ref','R']).T,xd['R']-p['ref','R']),9,1)[k]\n\n\n Lagrange_Tracking = ca.Function('lagrange_track', [xd,xa,u,p,puni,l],[Lagrange_Tracking])\n Lagrange_Regularisation = ca.Function( 'lagrange_reg', [xd,xa,u,p,puni,l],[Lagrange_Regularisation])\n\n\n Tracking = 0\n Regularisation = 0\n\n\n for k in range(self.nk): # V['XA',k,0] is not same time step as V['Xd',k,0] but same result\n ftrack = Lagrange_Tracking(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Tracking += ftrack\n\n freg = Lagrange_Regularisation(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Regularisation += freg\n\n E_final = 10. * V['Xd',-1,-1,'E'] # for maximising final energy\n Tracking_Cost = (1-P['toggle_to_energy']) * Tracking #* 1e-3 # Tracking of initial guess\n Regularisation_Cost = Regularisation # Regularisation of inputs\n Lift_Cost = 0.5*V['vlift']**2 #* 1e2 # Regularisation of inputs\n Energy_Cost = P['toggle_to_energy'] * (E_final/params['sref'])/V['tf']\n SOSCFix = 10. * V['Xd',self.nk/4,0,'q',1]**2\n\n Cost = 0\n Cost = (Tracking_Cost + Regularisation_Cost + Lift_Cost + SOSCFix)/float(self.nk) + Energy_Cost\n\n return Cost",
"def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)",
"def differential_evolution(f, D_dimension, N_samples, min_bound, max_bound):\n #Multidimensional of D dimensions\n #in our case D= 3 for CIR Model\n \n Min_Bound = min_bound\n Max_Bound = max_bound\n \n #restrict the space to only positive doubles in (0,1)\n D = D_dimension\n N = N_samples*D\n #N=5\n \n CR = 0.9 #[0,1]\n F = 0.8 #(0,2)\n \n #generate N points\n generation_ = 0\n matrix = np.random.random((D,N))*Max_Bound\n \n full_list = np.arange(N)\n collecting_fitness_scores = np.zeros(N)\n while generation_ < 200:\n \n #print \"\\nAt Generation {}\".format(generation_)\n \n for i in range(matrix.shape[1]): #going across columns...N \n \n #print \"at column {}\".format(i)\n \n #randomly pick 3 indices from the subset list\n new_list = np.delete(full_list,i)\n x1,x2,x3 = np.random.choice(new_list,3)\n \n #Mutation Stage\n V = matrix[:,x1] + F * (matrix[:,x2] - matrix[:,x3]) #check bounds here\n V = checkBounds(V,Min_Bound,Max_Bound)\n \n \n U = np.zeros(len(V)) #make trial vector\n random_choice = np.random.choice(D)\n #Recombination Stage (Crossover) Forming Trial Vector\n for j in range(len(V)):\n s = np.random.uniform() #draw random uniform number in [0,1]\n if (s <= CR) or (j == random_choice): #if s <= CR then crossover\n U[j] = V[j]\n else:\n U[j] = matrix[j,i]\n #U[0] = V[0] #definite crossover\n \n# #Comparing Fitness Scores\n# fitness_trial_vec = f(U)\n# fitness_original = f(matrix[:,i]) \n# if fitness_trial_vec <= fitness_original:\n# matrix[:,i] = U\n# collecting_fitness_scores[i] = fitness_trial_vec\n# else:\n# collecting_fitness_scores[i] = fitness_original\n \n x1_satisfied = checkConditions(U)\n x2_satisfied = checkConditions(matrix[:,i])\n \n fitness_trial_vec = f(U)\n fitness_original = f(matrix[:,i])\n \n \n if x1_satisfied and x2_satisfied:\n if fitness_trial_vec < fitness_original:\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n else:\n collecting_fitness_scores[i] = fitness_original\n elif (x1_satisfied == True) and (x2_satisfied == False):\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n elif (x1_satisfied == False) and (x2_satisfied == True):\n collecting_fitness_scores[i] = fitness_original\n else:\n if fitness_trial_vec < fitness_original:\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n else:\n collecting_fitness_scores[i] = fitness_original\n \n \n \n #print collecting_fitness_scores\n \n \n generation_ += 1\n \n print \"smallest fitness score: {}\".format(np.min(collecting_fitness_scores))\n smallest = matrix[:,np.argmin(collecting_fitness_scores)]\n print \"smallest: {}\".format(smallest)\n \n return smallest",
"def general_cross_function(verbosity, function):\r\n ret = 1\r\n first_errors = [False, False]\r\n for count in range(10, 25, 5):\r\n for points in range(5, 10):\r\n for ax_c in range(3, 5):\r\n axes = []\r\n for _ in range(ax_c):\r\n axes.append(((np.random.random_sample() * 2), (3 + np.random.random_sample() * 4)))\r\n population = GeneticAlgorithms.random_population(count, points, axes) # assumes this works\r\n for _ in range(len(population)):\r\n rd1 = np.random.choice(population)\r\n rd2 = np.random.choice(population)\r\n crs = function(rd1, rd2)\r\n if crs.shape != rd1.shape:\r\n ret = 0\r\n if verbosity > 0 and first_errors[0]:\r\n first_errors[0] = True\r\n print(\"ERROR: cross function doesn't return correct shape\")\r\n for i in range(points):\r\n for j in range(ax_c):\r\n if crs[i][j] < min(rd1[i][j], rd2[i][j]) or crs[i][j] > max(rd1[i][j], rd2[i][j]):\r\n ret = 0\r\n if verbosity > 0 and first_errors[1]:\r\n first_errors[1] = True\r\n print(\"ERROR: cross function doesn't return in correct range\")\r\n return ret",
"def do_better_sweep(offset=None, direction=None, low=-1e3, high=1e3, return_upto_one=False,\n debug=False, debug2=False, known_T=None, run=run, return_scalar=False):\n debug = debug and CHEATING\n debug2 = debug2 and CHEATING\n\n if offset is None:\n offset = np.random.normal(0, 1, size=(DIM))\n if direction is None:\n direction = np.random.normal(0, 1, size=(DIM))\n\n def memo_forward_pass(x, c={}):\n if x not in c:\n c[x] = run((offset + direction * x)[np.newaxis, :])\n return c[x]\n\n relus = []\n\n def search(low, high):\n if debug:\n logger.log(\"low high\", low, high, level=Logger.DEBUG)\n mid = (low + high) / 2\n\n y1 = f_low = memo_forward_pass(low)\n f_mid = memo_forward_pass(mid)\n y2 = f_high = memo_forward_pass(high)\n\n if debug:\n ncross = cheat_num_relu_crosses((offset + direction * low)[np.newaxis, :],\n (offset + direction * high)[np.newaxis, :])\n logger.log(\"ncross\", ncross, level=Logger.DEBUG)\n\n if debug:\n logger.log(\"aa\", f_mid, f_high, f_low, level=Logger.DEBUG)\n logger.log(\"compare\", np.abs(f_mid - (f_high + f_low) / 2), SKIP_LINEAR_TOL * ((high - low) ** .5),\n level=Logger.DEBUG)\n logger.log(\"really\", ncross, level=Logger.DEBUG)\n\n if np.abs(f_mid - (f_high + f_low) / 2) < SKIP_LINEAR_TOL * ((high - low) ** .5):\n # We're in a linear region\n if debug:\n logger.log(\"Skip linear\", sum(ncross), ncross, level=Logger.DEBUG)\n return\n elif high - low < 1e-8:\n # Too close to each other\n if debug:\n logger.log(\"wat\", ncross, level=Logger.DEBUG)\n return\n else:\n # Check if there is exactly one ReLU switching sign, or if there are multiple.\n # To do this, use the 2-linear test from Jagielski et al. 2019\n #\n # \n # /\\ <---- real_h_at_x\n # / \\\n # / \\\n # / \\\n # / \\\n # / \\\n # / \\\n # low q1 x_s_b q3 high\n # \n # Use (low,q1) to estimate the direction of the first line\n # Use (high,q3) to estimate the direction of the second line\n # They should in theory intersect at (x_should_be, y_should_be)\n # Query to compute real_h_at_x and then check if that's what we get\n # Then check that we're linear from x_should_be to low, and\n # linear from x_should_be to high.\n # If it all checks out, then return the solution.\n # Otherwise recurse again.\n\n q1 = (low + mid) * .5\n q3 = (high + mid) * .5\n\n f_q1 = memo_forward_pass(q1)\n f_q3 = memo_forward_pass(q3)\n\n m1 = (f_q1 - f_low) / (q1 - low)\n m2 = (f_q3 - f_high) / (q3 - high)\n\n if m1 != m2:\n d = (high - low)\n alpha = (y2 - y1 - d * m2) / (d * m1 - d * m2)\n\n x_should_be = low + (y2 - y1 - d * m2) / (m1 - m2)\n height_should_be = y1 + m1 * (y2 - y1 - d * m2) / (m1 - m2)\n\n if m1 == m2:\n # If the slopes on both directions are the same (e.g., the function is flat)\n # then we need to split and can't learn anything\n pass\n elif np.all(.25 + 1e-5 < alpha) and np.all(alpha < .75 - 1e-5) and np.max(x_should_be) - np.min(\n x_should_be) < 1e-5:\n x_should_be = np.median(x_should_be)\n real_h_at_x = memo_forward_pass(x_should_be)\n\n if np.all(np.abs(real_h_at_x - height_should_be) < SKIP_LINEAR_TOL * 100):\n # Compute gradient on each side and check for linearity\n\n eighth_left = x_should_be - 1e-4\n eighth_right = x_should_be + 1e-4\n grad_left = (memo_forward_pass(eighth_left) - real_h_at_x) / (eighth_left - x_should_be)\n grad_right = (memo_forward_pass(eighth_right) - real_h_at_x) / (eighth_right - x_should_be)\n\n if np.all(np.abs(grad_left - m1) > SKIP_LINEAR_TOL * 10) or np.all(\n np.abs(grad_right - m2) > SKIP_LINEAR_TOL * 10):\n if debug:\n logger.log(\"it's nonlinear\", level=Logger.DEBUG)\n pass\n else:\n\n if debug:\n logger.log(\"OK\", ncross, level=Logger.DEBUG)\n vals = cheat_get_inner_layers((offset + direction * x_should_be))\n smallest = min([np.min(np.abs(v)) for v in vals])\n logger.log(\"Small\", smallest, vals, level=Logger.DEBUG)\n if smallest > .01:\n raise\n if debug and sum(ncross) > 1:\n logger.log(\"BADNESS\", level=Logger.DEBUG)\n if return_scalar:\n relus.append(x_should_be)\n else:\n relus.append(offset + direction * x_should_be)\n return\n\n search(low, mid)\n if return_upto_one and len(relus) > 0:\n # we're done because we just want the left-most solution; don't do more searching\n return\n search(mid, high)\n\n if debug2 or debug:\n logger.log(\"Sweeping\", cheat_num_relu_crosses((offset + direction * low)[np.newaxis, :],\n (offset + direction * high)[np.newaxis, :]), level=Logger.DEBUG)\n\n # If we know what some of the earlier layers look like, then don't waste compute\n # to recover those early layer critical points again.\n # Just find the ones on the deeper layers and then add the early-layer ones in\n # (where they should be).\n # WARNING: this assumes that known_T is high precision. If it is not, then\n # it will add in the WRONG locations and that is very bad.\n if known_T is not None and False:\n def fwd(x):\n return np.sum(known_T.forward(x, with_relu=True), axis=1)\n\n prev_solns = do_better_sweep(offset, direction, low, high, run=fwd,\n return_scalar=True)\n prev_solns = [low] + prev_solns + [high]\n for l, h in zip(prev_solns, prev_solns[1:]):\n search(l, h)\n if h != high:\n relus.append(offset + direction * h)\n return relus\n\n search(low,\n high)\n\n return relus",
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def smart_clause_crossover_dispatch(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None, use_infeasibility=False):\n if use_infeasibility:\n smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)\n else:\n smart_clause_crossover(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)",
"def simulated_binary_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n di = args.setdefault('sbx_distribution_index', 10)\n bounder = args['_ec'].bounder\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\n try:\n if m > d:\n m, d = d, m\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\n u = random.random() \n if u <= (1.0 / alpha):\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\n else:\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\n bro_val = max(min(bro_val, ub), lb) \n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\n sis_val = max(min(sis_val, ub), lb)\n if random.random() > 0.5:\n bro_val, sis_val = sis_val, bro_val\n bro[i] = bro_val\n sis[i] = sis_val\n except ZeroDivisionError:\n # The offspring already have legitimate values for every element,\n # so no need to take any special action here.\n pass\n return [bro, sis]\n else:\n return [mom, dad]",
"def autocov(x, **kwargs):\n\t# only remove the mean once, if needed\n\tdebias = kwargs.pop('debias', True)\n\taxis = kwargs.get('axis', -1)\n\tif debias:\n\t\tx = _remove_bias(x, axis)\n\tkwargs[ 'debias' ] = False\n\treturn crosscov(x, x, **kwargs)",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n new_cxpb=cxpb/(cxpb+mutpb)\n new_mutpb=mutpb/(cxpb+mutpb)\n \n #num_cx=int(new_cxpb*len(offspring))\n #num_mu=len(offspring)-num_cx\n #print(new_cxpb, new_mutpb)\n # Apply crossover and mutation on the offspring\n i = 1\n while i < len(offspring):\n if random.random() < new_cxpb:\n if (offspring[i - 1] == offspring[i]):\n offspring[i - 1], = toolbox.mutate(offspring[i - 1])\n offspring[i], = toolbox.mutate(offspring[i])\n else:\n offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i])\n del offspring[i - 1].fitness.values, offspring[i].fitness.values\n i = i + 2\n else:\n offspring[i], = toolbox.mutate(offspring[i])\n del offspring[i].fitness.values\n i = i + 1\n return offspring",
"def c_test__cross_inp(self, old_population, population_weighting, run_locals):\r\n return 1",
"def gradient(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\n\n\t## Initial parameters\n\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\t### Define normal triad and proper motions\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\n\t### Eq. 8 in Lindegren+2000 (Covariance Matrix)\n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\n\t### Eq. 16 in Lindegren+2000 (Definition of D matrix)\t\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v*parallax[:]/_A)**2., (sigma_v*parallax[:]/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t### First derivatives in Eq. A3 \n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See Eq. A5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See Eq. A6\n\tdG_dpi, dG_dsigmav = np.zeros((3,3,N)), np.zeros((3,3,N)) \n\t\n\tdG_dpi[0,0,:], dG_dpi[0,1,:], dG_dpi[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[1,0,:], dG_dpi[1,1,:], dG_dpi[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[2,0,:], dG_dpi[2,1,:], dG_dpi[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dpi[:]\n\t\n\n\tdG_dsigmav[0,0,:], dG_dsigmav[0,1,:], dG_dsigmav[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[1,0,:], dG_dsigmav[1,1,:], dG_dsigmav[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[2,0,:], dG_dsigmav[2,1,:], dG_dsigmav[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dsigmav[:]\n\n\tf_dpi = np.zeros((N), dtype=np.float64) \n\t\n\t\n\tfor i in range(N):\n\t\tf_dpi_1, f_dpi_3 = 0., 0.0 \n\t\tfor ia in range(3):\n\t\t\tfor ib in range(3):\n\t\t\t\tf_dpi_1 += invD[ia,ib,i]*cprime_pi[ia,i]*a_c[ib][i]\n\t\t\t\tf_dpi_3 += (-0.5)*(dG_dpi[ia,ib,i]*a_c[ia][i]*a_c[ib][i])\n\t\t\t\t\t\n\t\tf_dpi_2 = (-0.5)*dlnd_dpi[i]\n\t\tf_dpi[i] = f_dpi_1 + f_dpi_2 + f_dpi_3\n\t\t\n\n\tf_vx, f_vy, f_vz, f_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) \n\n\tf_vx = np.sum(invD[0,0,:]*cprime_vx[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vx[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vx[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vx[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vx[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vx[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vx[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vx[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vx[2,:]*a_c[2][:])\n\t\n\tf_vy = np.sum(invD[0,0,:]*cprime_vy[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vy[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vy[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vy[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vy[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vy[1][:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vy[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vy[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vy[2,:]*a_c[2][:])\n\n\tf_vz = np.sum(invD[0,0,:]*cprime_vz[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vz[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vz[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vz[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vz[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vz[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vz[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vz[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vz[2,:]*a_c[2][:])\n\t\n\tf_sigmav = np.sum(-0.5*(dG_dsigmav[0,0,:]*a_c[0][:]*a_c[0][:] + dG_dsigmav[0,1,:]*a_c[1][:]*a_c[0][:]+ dG_dsigmav[0,2,:]*a_c[2][:]*a_c[0][:] + \\\n\t\t dG_dsigmav[1,0,i]*a_c[1][:]*a_c[0][:] + dG_dsigmav[1,1,:]*a_c[1][:]*a_c[1][:]+ dG_dsigmav[1,2,:]*a_c[1][:]*a_c[2][:] + \t\n\t\t dG_dsigmav[2,0,i]*a_c[2][:]*a_c[0][:] + dG_dsigmav[2,1,:]*a_c[2][:]*a_c[1][:]+ dG_dsigmav[2,2,:]*a_c[2][:]*a_c[2][:]))\n\t\n\n\tf_sigmav = f_sigmav - 0.5*np.sum(dlnd_dsigmav)\t\n\tf = np.concatenate((f_dpi, np.array([f_vx, f_vy, f_vz, f_sigmav]))) ### Grad L(theta), see Eq. 17\n\treturn -2.*f \t\t\t\t\t\t ### Grad U(theta), see Eq. 18",
"def mutation(i,N_p,t,T,P,N_vars,F_min,F_const):\n\n #Adaptive scaling factor\n if N_vars >= 3:\n F=F_min*2**np.exp(1-(T/(T+1-t)))\n else:\n F = F_const\n #candidates are assigned without the i-th element\n candidates= np.delete(np.arange(N_p), np.where(np.arange(N_p)==i))\n #3 target vectors are picked out randomly for the donorvector generator\n cand_rand=np.random.choice(candidates,3,replace= False)\n X1=P[cand_rand[0],]\n X2=P[cand_rand[1],]\n X3=P[cand_rand[2],]\n \n\t#Donorvctor generator\n V= X1 + F*(X2-X3)\n return V",
"def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def gradient_of_selection(f_jk,benefit_function,b,c=1,*params):\n f_jk['gos'] = f_jk.A*(b*benefit_function(f_jk.j+1,f_jk.k+1,*params)-c) - f_jk.B*b*benefit_function(f_jk.j,f_jk.k+1,*params)\n gos = f_jk.groupby('n')['gos'].sum() \n gos = gos*(Z-gos.index.values)*gos.index.values/(Z**2)*DELTA\n gos.loc[0]=gos.loc[100]=0\n return gos",
"def autocov(x, **kwargs):\r\n # only remove the mean once, if needed\r\n debias = kwargs.pop('debias', True)\r\n axis = kwargs.get('axis', -1)\r\n if debias:\r\n x = remove_bias(x, axis)\r\n kwargs['debias'] = False\r\n return crosscov(x, x, **kwargs)",
"def dual_equal(s, p, g_x_func, Z_0, k=None, P=None, T=None, \n tol=1e-2):\n from numpy import array, zeros_like, zeros\n from scipy.optimize import minimize\n from tgo import tgo\n \n def x_lim(X): # limiting function used in TGO\n import numpy\n return numpy.sum(X, axis=1) - 1.0 <= 0.0\n \n if k == None:\n k = p.m['Valid phases']\n \n # Initialize\n Z_0 = array(Z_0)\n LBD = - 1e300 # -inf\n s.update_state(s, p, X = Z_0, phase = k, Force_Update=True) \n \n # G_p (Primal problem Z_0_i - x_i = 0 for all i):\n UBD = g_x_func(s, p).m['g_mix']['t'] \n Lambda_d = zeros_like(Z_0)\n\n # X bounds used in UBD optimization\n X_bounds = [zeros(shape=(p.m['n']-1)), # Upper bound\n zeros(shape=(p.m['n']-1)) # Lower bound\n ] \n for i in range(p.m['n']-1): \n Sigma_ind = 0.0 # Sum of independent components excluding i\n # (lever rule) \n for j in range(p.m['n']-1):\n if j != i:\n Sigma_ind += Z_0[j]\n\n X_bounds[0][i] = 1.0 - Sigma_ind \n \n # Construct physical bounds x \\in [0, 1] for all independent components\n # (used in differential_evolution)\n Bounds = []\n for i in range(p.m['n'] - 1):\n #Bounds.append((0, 1))\n Bounds.append((1e-5, 0.99999))\n \n # Construct composition container from X_d for all phases. (REMOVED)\n X_d = Z_0\n\n\n #%% Normal calculation of daul problem if Z_0 is unstable.\n while abs(UBD - LBD) >= tol:\n # Solve UBD\n # Update system to new composition.\n # (Comp. invariant in UBD)\n s.update_state(s, p, X = X_d , phase = k, Force_Update=True) \n Lambda_sol = minimize(ubd, Lambda_d, method='L-BFGS-B', \n args=(g_x_func, X_d, Z_0, s, p, X_bounds,\n k))['x']\n \n Lambda_d = array(Lambda_sol) # If float convert back to 1x1 array\n \n # NOTE: NEGATIVE THE MAX DEFINED PROBLEM:\n UBD = -ubd(Lambda_d, g_x_func, X_d, Z_0, s, p, X_bounds, k)\n\n X_sol = tgo(lbd, Bounds, args=(g_x_func, Lambda_d, Z_0, s, p, k),\n g_func=x_lim,\n n = 100,\n skip=2)\n # Solve LBD \n LBD = lbd(X_sol, g_x_func, Lambda_d, Z_0, s, p, k) \n X_d = X_sol\n # End\n \n\n if False: # Print results optional\n print 'Final UBD = {}'.format(UBD)\n print 'Final LBD = {}'.format(LBD)\n print 'Final UBD - LBD = {}'.format(UBD - LBD)\n print 'Final Z_eq = {}'.format(X_d)\n print 'Final Lambda_d = {}'.format(Lambda_d)\n \n # Returns\n s.m['Z_eq'] = X_d\n s.m['Lambda_d'] = Lambda_d\n return s",
"def monotonic_contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n xpts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n # compute automatic differentiated curvature:\n qxdot = np.dot(xpts,self.localBasis[1,:])\n qxddot = np.dot(xpts,self.localBasis[2,:])\n qydot = np.dot(ypts,self.localBasis[1,:])\n qyddot = np.dot(ypts,self.localBasis[2,:]) \n #computation of doubledots is expanded below\n \n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = ((np.sqrt(qxdot*qxdot + qydot*qydot) )**3. )*constraint\n lhs = (np.sqrt(qxdot**2 + qydot**2)**3.) *constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxdot.contains(0.):\n print 'qxdot = ',qxdot\n print 'qxdot not invertable, implement other logic please'\n else:\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n t1 = (lhs + qydot*qxddot)/(qxdot)#*(qxdot**-1.)\n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (t1 - ypts[j]*float(self.localBasis[2,j])) + min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ## \n ## qxdot\n ##\n \n if qyddot.contains(0.):\n print 'qyddot = ',qyddot\n print 'qyddot not invertable, implement other logic please'\n else:\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + qydot*qxddot)/(qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxddot.contains(0.):\n print 'qxddot = ',qxddot\n print 'qxddot not invertable, implement other logic please'\n else:\n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - qxdot*qyddot)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (t1 - ypts[j]*float(self.localBasis[1,j])) + min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ##contract on qxdot\n \n \n #contract on qxddot\n if qydot.contains(0.):\n print 'qydot = ',qxddot\n print 'qydot not invertable, implement other logic please'\n else:\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n \n return vertices",
"def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause",
"def main(N,N_p,T,lb,ub,prob,N_vars,F_min,F_const,P_c_min,P_c_max):\n\n lb,ub,f,fu,D,U,P = initDE(N_p,lb,ub,prob)\n if N_p < 4:\n raise Exception(\"Sorry, there must be atleast a population of 4. Reccomended 20\")\n for t in np.arange(T):\n for i in np.arange(N_p):\n V = mutation(i,N_p,t,T,P,N_vars,F_min,F_const)\n\n U=crossover(f,P_c_min,P_c_max,i,D,V,P,U)\n\n for j in np.arange(N_p): \n N,f,P = boundgreed(N,j,U,P,f,fu,ub,lb,prob)\n\t\n\t\t#if N == 500:\n\t\t\t#break\n best_of_f= min(f)\n globopt = P[f.argmin()]\n return N,best_of_f, globopt[:N_vars]",
"def limit_accel_in_turns(v_ego, angle_steers, a_target, CP, angle_later):\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * abs(angle_steers) * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_y2 = v_ego**2 * abs(angle_later) * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = a_total_max - a_y\n a_x_allowed2 = a_total_max - a_y2\n\n a_target[1] = min(a_target[1], a_x_allowed, a_x_allowed2)\n a_target[0] = min(a_target[0], a_target[1])\n #print a_target[1]\n return a_target",
"def crossover(x, y):\n return x[-1] > y[-1] and x[-2] < y[-2]",
"def set_c_values(self,nu=3,xd=2,a12sq=1,a13sq=1,a1=0,b1=0,p1=0,\n vx=0,vy=0,vz=0,epsilon=0.,omega=1.5*np.pi,\n a2=np.pi,b2=0*np.pi,p2=0.5*np.pi,silent=False):\n \n c_target = np.zeros(18)\n c_target[0] = nu*self.sigma0_RG \n xd_abs = xd*self.sigma2_RG\n c_target[4:10] = set_fGij(xd_abs,a12sq,a13sq,a1,b1,p1)\n c_target[10:13] = np.array([vx,vy,vz])\n c_target[13:18] = set_VGij(epsilon,omega,a2,b2,p2)\n \n if silent == False:\n print (\"Constrain peak parameters: \")\n if 'f0' in self.CONS or 'full' in self.CONS: \n print (\"f0: \",\"nu = %.1f\"%nu, \"$\\sigma_0$\")\n if 'f1' in self.CONS or 'full' in self.CONS: \n print (\"f1: \",\"f1,x = f1,y = f1,z = 0\")\n if 'f2' in self.CONS or 'full' in self.CONS: \n print (\"f2: \",r\"xd = {:.1f} $\\sigma_2$, a12sq = {:.1f}, a13sq = {:.1f},a1={:.2f}, b1={:.2f}, p1={:.2f}\".format(xd,a12sq,a13sq,a1,b1,p1))\n if 'vx' in self.CONS or 'full' in self.CONS: \n print (\"vx = {:.1f} km/s\".format(vx)) \n if 'vy' in self.CONS or 'full' in self.CONS: \n print (\"vy = {:.1f} km/s\".format(vy))\n if 'vz' in self.CONS or 'full' in self.CONS: \n print (\"vz = {:.1f} km/s\".format(vz))\n if 'TG' in self.CONS or 'full' in self.CONS: \n print (\"TG: \",\"epsilon = {:.1f} km/s/Mpc, omega = {:.2f}, a2={:.2f}, b2={:.2f}, p2={:.2f}\".format(epsilon,omega,a2,b2,p2))\n \n return c_target[self.cmask]",
"def solve(targets, \n payoff,\n defender_resources:int=1, \n attacker_resources:int=1, \n ptype:str=\"MILP\", \n minimax:str=\"maximize\"):\n # Need a big number. Will lower bound later\n M = 9999\n\n p = cplex.Cplex()\n if ptype in (\"milp\", \"MILP\"):\n p.set_problem_type(cplex.Cplex.problem_type.MILP)\n else:\n print(\"Problem type:\",ptype,\"is not currently supported\")\n exit(1)\n\n if minimax in (\"max\",\"maximize\"):\n p.objective.set_sense(p.objective.sense.maximize)\n elif minimax in (\"min\",\"minimize\"):\n p.objective.set_sense(p.objective.sense.minimize)\n else:\n print(\"Only solves maximization or minimization problems\")\n\n num_targets = len(targets)\n # v is the z's, x's, v_def, and v_att\n v = [\"z\"+str(t) for t in range(num_targets)] \\\n + [\"x\"+str(t) for t in range(num_targets)] \\\n + [\"v_def\",\"v_att\"] \n num_variables = len(v)\n obj = np.zeros(num_variables)\n for i in range(num_variables):\n if v[i] == \"v_def\":\n obj[i] = 1.\n lb = np.zeros(num_variables)\n ub = np.ones(num_variables)\n for i in range(num_variables):\n if v[i] in (\"v_def\",\"v_att\"):\n ub[i] = cplex.infinity\n lb[i] = -1*cplex.infinity\n\n p.variables.add(obj = obj, # Objective function\n lb = lb, # Lower bound\n ub = ub, # Upper bound\n names = v) # Variable names\n # z_i \\in {0,1} Set all z_i to integer values\n [p.variables.set_types([(\"z\"+str(t),p.variables.type.integer)]) for t in range(num_targets)]\n # x_i \\in [0,1] Set all x_i to continuous values\n [p.variables.set_types([(\"x\"+str(t),p.variables.type.continuous)]) for t in range(num_targets)]\n # Also set for attacker and defender\n p.variables.set_types([(\"v_def\",p.variables.type.continuous)])\n p.variables.set_types([(\"v_att\",p.variables.type.continuous)])\n\n util_du = [M+payoff[i][2] for i in range(num_targets)]\n util_dc = [payoff[i][3] for i in range(num_targets)]\n util_ac = [M+payoff[i][3] for i in range(num_targets)]\n init_params = np.array([1.,defender_resources])\n rhs = np.hstack((init_params, util_du, util_dc, util_ac))\n\n senses = [\"E\",\"L\"] \\\n + [\"L\" for i in range(num_targets)] \\\n + [\"G\" for i in range(num_targets)]\\\n + [\"L\" for i in range(num_targets)]\n \n\n constraints = []\n zl = []\n zc = []\n xl = []\n xc = []\n for t in range(num_targets):\n zl.append(\"z\"+str(t))\n zc.append(1.)\n xl.append(\"x\"+str(t))\n xc.append(1.)\n constraints.append([zl,zc])\n constraints.append([xl,xc])\n\n # Defender's utility\n # Interleave vars and coefficients\n # Easier doing it this way that inline loops\n def_util_vars = []#np.zeros(num_targets*3)\n def_util_coef = []#np.zeros(num_targets*3)\n def_util = []\n for i in range(num_targets):\n def_util_vars = ([\"v_def\", \"x\"+str(i), \"z\"+str(i)])\n def_util_coef = ([1., (payoff[i][2] - payoff[i][1]), M])\n constraints.append([def_util_vars, def_util_coef])\n\n\n\n # Attacker strats\n att_strat_vars = []\n att_strat_coef = []\n att_strat = []\n for i in range(num_targets):\n att_strat_vars = ([\"v_att\", \"x\"+str(i)])\n att_strat_coef = ([1., payoff[i][3] - payoff[i][4]])\n constraints.append([att_strat_vars,att_strat_coef])\n\n\n # Attacker utility\n att_util_vars = []\n att_util_coef = []\n att_util = []\n for i in range(num_targets):\n att_util_vars = ([\"v_att\", \"x\"+str(i), \"z\"+str(i)])\n att_util_coef = ([1., payoff[i][3] - payoff[i][4], M])\n constraints.append([att_util_vars, att_util_coef])\n\n # Throw them all together\n constraint_names = [\"r\"+str(i) for i in range(len(constraints))]\n\n p.linear_constraints.add(lin_expr = constraints,\n senses = senses,\n rhs = rhs,\n names = constraint_names)\n p.solve()\n return p.solution.get_values()",
"def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos",
"def contracting_channel_cross(m, n, W_upstream = 1., W_downstream = 0.75,\n L_1 = 5.0, L_2 = 2.0, L_3 = 10, origin = (0.0, 0.0)):\n\n import math\n\n from anuga.config import epsilon\n\n\n lenx = L_1 + L_2 + L_3\n leny = W_upstream\n deltax = lenx/float(m)\n deltay = leny/float(n)\n\n x1 = 0\n y1 = 0\n x2 = L_1\n y2 = 0\n x3 = L_1 + L_2\n y3 = (W_upstream - W_downstream)/2\n x4 = L_1 + L_2 + L_3\n y4 = y3\n x5 = x4\n y5 = y4 + W_downstream\n x6 = L_1 + L_2\n y6 = y5\n x7 = L_1\n y7 = W_upstream\n x8 = 0\n y8 = W_upstream\n a1 = 0\n a2 = (W_upstream - W_downstream)/(2*L_2)\n a3 = 1\n a4 = (W_downstream - W_upstream)/(L_2*W_upstream)\n\n # Dictionary of vertex objects\n vertices = {}\n points = []\n\n for i in range(m+1):\n x = deltax*i\n for j in range(n+1):\n y = deltay*j\n if x > L_1 and x <= (L_1 + L_2):\n y = a1 + a2*(x - L_1) + a3*y + a4*(x - L_1)*y\n elif x > L_1 + L_2:\n y = (W_upstream - W_downstream)/2 + deltay*j*W_downstream/W_upstream\n\n vertices[i,j] = len(points)\n points.append([x + origin[0], y + origin[1]])\n\n # Construct 4 triangles per element\n elements = []\n boundary = {}\n for i in range(m):\n for j in range(n):\n v1 = vertices[i,j+1]\n v2 = vertices[i,j]\n v3 = vertices[i+1,j+1]\n v4 = vertices[i+1,j]\n x = (points[v1][0]+points[v2][0]+points[v3][0]+points[v4][0])*0.25\n y = (points[v1][1]+points[v2][1]+points[v3][1]+points[v4][1])*0.25\n v5 = len(points)\n points.append([x, y])\n\n #Create left triangle\n if i == 0:\n boundary[(len(elements), 1)] = 'left'\n elements.append([v2,v5,v1])\n\n #Create bottom triangle\n if j == 0:\n boundary[(len(elements), 1)] = 'bottom'\n elements.append([v4,v5,v2])\n\n #Create right triangle\n if i == m-1:\n boundary[(len(elements), 1)] = 'right'\n elements.append([v3,v5,v4])\n\n #Create top triangle\n if j == n-1:\n boundary[(len(elements), 1)] = 'top'\n elements.append([v1,v5,v3])\n\n\n return points, elements, boundary",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population"
] |
[
"0.62787056",
"0.5401195",
"0.536973",
"0.53535783",
"0.53305084",
"0.52989906",
"0.5220156",
"0.5086378",
"0.5010793",
"0.50038093",
"0.5000887",
"0.497259",
"0.49526855",
"0.4950322",
"0.49460703",
"0.4939531",
"0.49298885",
"0.49173522",
"0.49152607",
"0.4909205",
"0.49014136",
"0.48996878",
"0.48962954",
"0.4870206",
"0.4866886",
"0.48663187",
"0.48530194",
"0.48461345",
"0.4844199",
"0.48169085"
] |
0.67585707
|
0
|
Function that uses pythagorean theorem to calculate distance between the found point and known location. NB!!! This function is not used in the main prgram so thist must be called itself.
|
def distance(known_loc,found_loc,N_vars,):
undersqrt=np.zeros(N_vars)
for i in (np.arange(N_vars)):
undersqrt[i] =(known_loc[i]-found_loc[i])**2
dist = np.sqrt(sum(undersqrt))
return dist
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance",
"def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm",
"def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm",
"def distance(self, pt):\n return math.sqrt((self.x - pt.x) ** 2 + (self.y - pt.y) ** 2)",
"def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)",
"def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)",
"def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))",
"def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)",
"def distance_to(self, location):\r\n return gislib.getDistance((self.latitude, self.longitude), location)",
"def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0",
"def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)",
"def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d",
"def _calc_distance(self, checkpoint_loc):\n return N.sqrt((self.current_location[1] - checkpoint_loc[1])**2 \\\n + (self.current_location[0] - checkpoint_loc[0])**2)",
"def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))",
"def distance(self, other_pt, is_lla=True):\n return 0.0",
"def distance_to_location(self, row, col):\n return float(sqrt(pow(self._row - row, 2) + pow(self._col - col, 2)))",
"def DISTANCE(x,y,x2=0,y2=0):\n\treturn sqrt((x-x2)*(x-x2)+(y-y2)*(y-y2))",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5",
"def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d",
"def squaredDistanceTo(self,other):\n if not isinstance(other,Point):\n return \n return (self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2",
"def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01",
"def get_location_distance(game, player, opp_player):\n player_location = game.get_player_location(player)\n opp_location = game.get_player_location(opp_player)\n x_distance = math.pow(player_location[0] - opp_location[0], 2)\n y_distance = math.pow(player_location[1] - opp_location[1], 2)\n return math.sqrt(x_distance + y_distance)",
"def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))",
"def euclideanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return math.sqrt((loc1[1]-loc2[1])**2+(loc1[0]-loc2[0])**2)\n # END_YOUR_CODE",
"def compute_distance(fisrt_point: tuple, second_point: tuple) -> float:\n return sqrt(int(fisrt_point[0] - second_point[0])**2 + int(fisrt_point[1] - second_point[1])**2)",
"def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))"
] |
[
"0.67001003",
"0.67001003",
"0.668429",
"0.66520405",
"0.66520405",
"0.66172695",
"0.6616293",
"0.66006625",
"0.65998554",
"0.65997404",
"0.65976804",
"0.6571387",
"0.6536612",
"0.6525614",
"0.6512911",
"0.65055317",
"0.64955235",
"0.6490895",
"0.6487881",
"0.6487362",
"0.64849156",
"0.6483986",
"0.64823765",
"0.6480024",
"0.6474494",
"0.64713705",
"0.6427",
"0.64117986",
"0.6408971",
"0.6407271"
] |
0.6931908
|
0
|
Prints the accuracy of a model in a nice format.
|
def print_accuracy(acc: float, model_name: str) -> None:
print(f"accuracy of {model_name} = {round(100* acc,2)}%")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)",
"def print_model_analysis(predictions, targets, print_conf_matrix=False):\n accuracy = accuracy_per_shift(predictions, targets)\n built_in_accuracy = accuracy_score(targets, predictions)\n\n conf_matrix = custom_confusion_matrix(predictions, targets)\n\n precision, recall = precision_recall(conf_matrix)\n\n print('Accuracy:', accuracy)\n print('Built-in accuracy:', built_in_accuracy)\n if print_conf_matrix:\n print('Confusion Matrix:\\n', conf_matrix)\n print('Precision:', precision)\n print('Recall:', recall)\n\n print()",
"def acc_print(label, acc):\n for i in range(len(label)):\n print(\"Accuracy of \", label[i], \" is \", np.round(acc[i], 5))",
"def print_score(classifier,X_test,y_test):\n print(\"Test results:\\n\")\n print('Accuracy Score: {0:.4f}\\n'.format(accuracy_score(y_test,classifier.predict(X_test))))\n print('Classification Report:\\n{}\\n'.format(classification_report(y_test,classifier.predict(X_test))))\n print('Confusion Matrix:\\n{}\\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))",
"def print_evaluations(ytrue, ypred, model):\n\n print(f'How does model {model} score:')\n print(f'The accuracy of the model is: {round(accuracy_score(ytrue, ypred), 3)}')\n print(f'The precision of the model is: {round(precision_score(ytrue, ypred, pos_label=\"bastille_\" ), 3)}')\n print(f'The recall of the model is: {round(recall_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')\n print(f'The f1-score of the model is: {round(f1_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')",
"def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')",
"def print_acc(class_matrix):\n total = 0\n num_index = len(class_matrix)\n for i in range(num_index):\n total += class_matrix[i][i]\n print(\"Accuracy: {0}%\".format(100 * total/np.sum(class_matrix)))",
"def show_accuracy(ground_truth, predicted):\n acc_score = accuracy_score(ground_truth, predicted)\n prec_score = precision_score(ground_truth, predicted, average='weighted')\n recl_score = recall_score(ground_truth, predicted, average='weighted')\n print('--- Accuracy: ', acc_score)\n print('--- Precision: ', prec_score)\n print('--- Recall: ', recl_score)\n print('--- Classification report: ')\n print(precision_recall_fscore_support(ground_truth, predicted))\n print(classification_report(ground_truth, predicted))\n return acc_score, prec_score, recl_score",
"def print_network(self):\n #plot_model(self.model, to_file='model.png', show_shapes=True)\n logging.info(\"\")\n logging.info(self.network)\n logging.info(\"Network accuracy: %.2f%%\" % (self.accuracy * 100))\n logging.info(\"Network loss: %.2f%%\" % (self.loss))",
"def report_accuracy(\r\n predictions: pd.DataFrame, \r\n test_y: pd.DataFrame\r\n) -> None:\r\n # Calculate accuracy of predictions\r\n accuracy = (predictions == test_y).mean()\r\n \r\n # Log the accuracy of the model\r\n log = logging.getLogger(__name__)\r\n log.info(\"Model accuracy on test set: %0.2f%%\", accuracy * 100)\r\n\r\n mlflow.log_metric(\"accuracy\", accuracy)\r\n mlflow.set_tag(\"Model Version\", 1)",
"def example():\n accShape, accTexture, accFinal = train_and_eval_all_models()\n print(f'Accuracy -- only shape: {accShape}; only texture: {accTexture}; combined: {accFinal}.')",
"def get_overall_accuracy(self):\n\n self.overall_accuracy = np.sum(Model.accuracies)/len(Model.accuracies)\n\n #saving the accuracies\n if( not os.path.isfile(\"logs.txt\") ):\n with open(\"logs.txt\", 'a') as f:\n f.write(\"States, U_0, U_1, U_2, U_3, U_4, U_5, U_6, U_7,\" + \\\n \" U_8, U_9, accuracy\\n\")\n\n with open(\"logs.txt\", 'a+') as f:\n f.write(f\" {self.n_components} \" )\n for i in Model.accuracies:\n f.write(f\"{i} \" )\n f.write(f\"{self.overall_accuracy}\\n\")\n\n return self.overall_accuracy",
"def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))",
"def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)",
"def accuracy(self):",
"def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc",
"def test_print_results(self):\n calculated = super().predict_and_print()\n self.assertEqual(calculated, EXP_PRINT_OUTPUT_BASE.format(.18, .1, 0.186, self.test_model.model.train_time) +\n \"Max tree max_depth: 1\\n\"\n \"Number of n_estimators: 1\\n\"\n \"Impurity method: entropy\\n\")",
"def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)",
"def printAccuracies(predictions, expected):\n\tmatches = Utilities.matchDictionaries(predictions, expected)\n\ttotal = float(len(predictions))\n\taccuracy = matches/total\n\n\tprint \"Matches: \", matches\n\tprint \"Total: \", total\n\tprint \"Accuracy: \", accuracy",
"def print_report(\n m, X_valid, y_valid, t=0.5, X_train=None, y_train=None, show_output=True\n):\n # X_train = X_train.values\n # X_valid = X_valid.values\n\n if isinstance(m, list):\n probs_valid = predict_ensemble(m, X_valid)\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = predict_ensemble(m, X_train)\n y_train_pred = adjusted_classes(probs_train, t)\n else:\n probs_valid = m.predict_proba(X_valid)[:, 1]\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = m.predict_proba(X_train)[:, 1]\n y_train_pred = adjusted_classes(probs_train, t)\n\n res = [\n roc_auc_score(y_valid, probs_valid),\n f1_score(y_valid, y_val_pred),\n confusion_matrix(y_valid, y_val_pred),\n ]\n result = f\"AUC valid: {res[0]} \\nF1 valid: {res[1]}\"\n\n if X_train is not None:\n res += [\n roc_auc_score(y_train, probs_train),\n f1_score(y_train, y_train_pred),\n ]\n result += f\"\\nAUC train: {res[3]} \\nF1 train: {res[4]}\"\n\n acc_train = m.score(X_train, y_train)\n acc_valid = m.score(X_valid, y_valid)\n\n if show_output:\n logging.info(f\"train acc: {acc_train}\")\n logging.info(f\"test acc: {acc_valid} \")\n\n logging.info(result)\n plot_confusion_matrix(\n m, X_valid, y_valid, display_labels=y_valid.unique()\n )\n logging.info(classification_report(y_valid, y_val_pred))\n plt.show()\n return {\n \"train\": {\"AUC\": res[3], \"F1\": res[4], \"acc\": acc_train},\n \"test\": {\"AUC\": res[0], \"F1\": res[1], \"acc\": acc_valid},\n }",
"def print_accuracy(precision, recall, f1score, accuracy, nb_class, class_name=None, confusion_matrix=None):\n class_name = class_name if class_name is not None else [str(elm) for elm in range(nb_class)]\n class_name = [\"Average\"] + class_name\n col_padding = [15] + [max(9, len(elm)) for elm in class_name]\n line = [\n \"\".ljust(col_padding[0], \" \"),\n \"Precision\".ljust(col_padding[0], \" \"),\n \"Recall\".ljust(col_padding[0], \" \"),\n \"F1score\".ljust(col_padding[0], \" \")\n ]\n for i in range(len(class_name)):\n line[0] += class_name[i].ljust(col_padding[i + 1], \" \")\n line[1] += \"{}%\".format(str(round(precision[i] * 100, 2))).ljust(col_padding[i + 1], \" \")\n line[2] += \"{}%\".format(str(round(recall[i] * 100, 2))).ljust(col_padding[i + 1], \" \")\n line[3] += \"{}%\".format(str(round(f1score[i] * 100, 2))).ljust(col_padding[i + 1], \" \")\n print(\"\\n\".join(line))\n print(\"{title:<{width1}}{val:<{width2}}\".format(\n title=\"Accuracy\", width1=col_padding[0], val=str(round(accuracy * 100, 2)) + \"%\", width2=col_padding[1]))\n if confusion_matrix is not None:\n print(\"Confusion matrix:\\n{}\".format(confusion_matrix))",
"def report_accurarcy(model, svc, loader, device=torch.device('cpu')):\n embeddings, labels = get_embeddings(model, loader.dataset, device)\n\n predictions = [svc.predict(e) for e in embeddings]\n ground_truths = [np.argmax(l, axis=-1) for l in labels]\n\n cm = confusion_matrix(ground_truths, predictions)\n acc = accuracy_score(ground_truths, predictions)\n return acc, cm",
"def accuracy(self):\n\t\treturn self.accuracy_",
"def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)",
"def show_precision(self):\r\n return round(f1_score(self.actual, self.predicted),2)",
"def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")",
"def accuracy_info(self):\n return self.bayes_accuracy",
"def print_summary(self):\n self.model.summary()",
"def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'",
"def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)"
] |
[
"0.7742356",
"0.71960884",
"0.70556444",
"0.7024729",
"0.69513243",
"0.6905513",
"0.6895574",
"0.6863374",
"0.6848317",
"0.6816727",
"0.67776877",
"0.67469156",
"0.6731067",
"0.66809744",
"0.66269875",
"0.65719056",
"0.65452284",
"0.6532581",
"0.6512558",
"0.646412",
"0.6449968",
"0.6442765",
"0.6417615",
"0.64080435",
"0.6373252",
"0.6338265",
"0.63230604",
"0.631127",
"0.6295997",
"0.62920976"
] |
0.81111825
|
0
|
Checks whether the specified 'childid' halo is a subhalo of 'parentid' halo.
|
def is_subhalo(self, childid, parentid):
if (childid in self._halos[parentid].properties['children']):
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")",
"def is_subpath_of(parent, child):\n # Based on https://stackoverflow.com/a/37095733 .\n\n # In Python 3.9, the `Path.is_relative_to()` method will supplant this, so\n # we can stop using crusty old os.path functions.\n parent_realpath = os.path.realpath(parent)\n child_realpath = os.path.realpath(child)\n return os.path.commonpath([parent_realpath, child_realpath]) == parent_realpath",
"def is_state_a_child(child: State, parent: State) -> bool:\n if child.x >= parent.x and child.y >= parent.y and child.x <= parent.x + parent.width and child.y<=parent.y+parent.height:\n return True\n return False",
"def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()",
"def is_subhalo(self, otherhalo):\n\n return self._halo_catalogue.is_subhalo(self._halo_id, otherhalo._halo_id)",
"def isAncestorOf(ancestor, child):\n\twhile child is not None:\n\t\tif child is ancestor:\n\t\t\treturn True\n\t\tchild = child.parent()\n\treturn False",
"def is_sub(parent, path):\n parent = canonical_path(parent, resolve_link=False)\n path = canonical_path(path, resolve_link=False)\n return os.path.commonprefix([parent, path]) == parent",
"def isSubDir(parent, child):\n pParts = pathComponents(os.path.abspath(parent))\n cParts = pathComponents(os.path.abspath(child))\n if len(pParts) < len(cParts):\n return cParts[:len(pParts)] == pParts",
"def _is_hierachy_searchable(child_id: str) -> bool:\n pieces_of_child_id_list = child_id.split('.')\n suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1]\n return suffix.isnumeric()",
"def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False",
"def is_subdir(suspect_child, suspect_parent):\n suspect_child = os.path.realpath(suspect_child)\n suspect_parent = os.path.realpath(suspect_parent)\n relative = os.path.relpath(suspect_child, start=suspect_parent)\n return not relative.startswith(os.pardir)",
"def circular_checker(parent, child):\n if parent == child:\n raise ValidationError('Self links are not allowed.')\n\n if child.pk in parent.get_ancestor_pks():\n raise ValidationError('The object is an ancestor.')",
"def _check_parentid_permission_container(syn, parentid):\n if parentid is not None:\n try:\n syn_ent = syn.get(parentid, downloadFile=False)\n # If not container, throw an assertion\n assert synapseclient.entity.is_container(syn_ent)\n except (SynapseHTTPError, AssertionError):\n raise ValueError(\n \"Provided Synapse id must be your input folder Synapse id \"\n \"or a Synapse Id of a folder inside your input directory\"\n )",
"def is_child_of(self, *args):\n return _ida_hexrays.cexpr_t_is_child_of(self, *args)",
"def create_subdir_check ( parent, fs_sep=os.sep ):\n PARENT_PATH = parent.rstrip ( fs_sep ).split ( fs_sep )\n\n def is_subdir ( dirpath,\n _path_el=PARENT_PATH, _path_len=len( PARENT_PATH ), _fs_sep=fs_sep\n ):\n \"\"\"Returns True if the given filesystem path is a subpath of the\n (predefined) parent path, else False.\n\n arguments:\n * dirpath -- filesystem path to be checked\n * _path_el -- local variable containing information about the parent\n path. Shouldn't be set manually.\n * _path_len -- local variable containing information about the length\n of the parent path. Shouldn't be set manually.\n * _fs_sep -- local variable that is a copy of fs_sep.\n Shouldn't be set manually.\n \"\"\"\n dirpath_el = dirpath.rstrip ( _fs_sep ).split ( _fs_sep )\n if len ( dirpath_el ) < _path_len:\n return False\n else:\n return all (\n this == expect for this, expect in zip ( dirpath_el, _path_el )\n )\n # --- end of is_subdir (...) ---\n\n return is_subdir",
"def has_parent(parent, parent_id, children):\n args = get_args(request.args)\n if request.method == 'GET':\n #Something like /api/domains/<id>/virtualmachines will be equivalent to listVirtualMachines?domainid=<id>\n verb = \"list\"\n subject = children\n #If parent is 'domains' it is added into args as domainid, i.e singular[domains] + 'id'\n args[singular[parent] + 'id'] = parent_id\n return apicall(verb, subject, args)",
"def haschild(self, child):\n return pbxhelper.pbxobj_has_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child)",
"def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False",
"def has_parent(obj, parent_name):\n if obj.parent is None:\n return False\n if obj.parent.name is None:\n return False\n elif obj.parent.name == parent_name:\n return True\n else:\n return has_parent(obj.parent, parent_name)",
"def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()",
"def IsDescendantOf(self, parent, item):\r\n\r\n while item:\r\n \r\n if item == parent:\r\n \r\n # item is a descendant of parent\r\n return True\r\n \r\n item = item.GetParent()\r\n \r\n return False",
"def is_state_a_child_by_coord(x, y, width, height, parent: State) -> bool:\n if x+1 >= parent.x and y+1 >= parent.y and x + width - 1 <= parent.x + parent.width:\n if y + height - 1 <= parent.y + parent.height:\n return True\n return False",
"def is_child(self, kid, mother): \n mom_node = self.names_to_nodes[mother] \n child_node = self.names_to_nodes[kid]\n return mom_node.is_child(child_node)",
"def is_known(self, child):\r\n return child in self._parents",
"def is_child_graph(self, child_graph):\n # pylint: disable=protected-access\n if not child_graph or not child_graph._parent_graph:\n return False\n if child_graph._parent_graph == self:\n return True\n return self.is_child_graph(child_graph._parent_graph)\n # pylint: enable=protected-access",
"def is_ancestor(parent_alphabet, child_alphabet):\r\n alphabet = parent_alphabet\r\n while alphabet:\r\n if child_alphabet == alphabet:\r\n return True\r\n alphabet = alphabet.alphabet\r\n return False",
"def is_child_of_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.is_child_of_bin\n return self._get_provider_session('catalog_hierarchy_session').is_child_of_catalog(*args, **kwargs)",
"def has_child(self):\n return False",
"def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0",
"def contains_child(self, pid):\n return pid in self._children_ids"
] |
[
"0.7011642",
"0.6519133",
"0.6421444",
"0.6203408",
"0.61802125",
"0.6062339",
"0.604538",
"0.60408974",
"0.60153866",
"0.5923618",
"0.59223753",
"0.58900964",
"0.5889205",
"0.5869861",
"0.58656186",
"0.58480877",
"0.5774682",
"0.57726324",
"0.5757294",
"0.57391155",
"0.57132775",
"0.56612563",
"0.5619498",
"0.5604111",
"0.55758035",
"0.5569924",
"0.547632",
"0.5468012",
"0.54617774",
"0.5453713"
] |
0.8721258
|
0
|
Creates a 'grp' array which labels each particle according to its parent halo.
|
def make_grp(self):
try:
self.base['grp']
except:
self.base['grp'] = np.zeros(len(self.base),dtype='i')
for halo in self._halos.values():
halo[name][:] = halo._halo_id
if config['verbose']: print "writing %s"%(self._base().filename+'.grp')
self._base().write_array('grp',overwrite=True,binary=False)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getGroups(self):\n groups_ = {'black': [], 'white': []}\n for color, stones in self.stones.items():\n if not stones: continue\n # (group_labels) is a parallel array to (stones). Where each value is an\n # int and each int value represents a group. Examples:\n # [1, 1] = 1 group: 1 group of 2 stones\n # [1, 1, 2] = 2 groups: 1 group of 2 stones and 1 group of 1 stone\n # [1, 1, 2, 3] = 3 groups: 1 group of 2 stones, 1 group of 1 stone, and 1 group of 1 stone\n group_labels = [0] * len(stones)\n\n new_label = 1\n for i, stone in enumerate(stones):\n # Assign new label to stone, if stone has yet to be labelled.\n if group_labels[i] == 0:\n group_labels[i] = new_label\n new_label += 1\n # Inner loop compares outer loop (stone) with all other (stones).\n for other_i, other_stone in enumerate(stones):\n if i == other_i: continue\n if stone.isNeighbor(other_stone):\n # If inner loop stone has yet to be labelled, then inner loop stone is\n # labelled with outer loop stones label.\n if group_labels[other_i] == 0:\n group_labels[other_i] = group_labels[i]\n # If inner loop stone has already been labelled, then all stones previously\n # labelled with outer loop stone's label, get their labels reassigned to the\n # inner loop stone's label.\n else:\n new_labels = []\n for ga in group_labels:\n if ga == group_labels[i]: new_labels += [ group_labels[other_i] ]\n else: new_labels += [ ga ]\n group_labels = new_labels\n # (groups_) are created now that (group_labels) has been generated.\n for master_label in range(max(group_labels)):\n master_label += 1\n stones_to_group = []\n for i, label in enumerate(group_labels):\n if master_label == label:\n stones_to_group += [ self.stones[color][i] ]\n groups_[color] += [ Group(self, stones_to_group) ]\n return groups_",
"def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child",
"def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()",
"def make_grp(self, name='grp', v=False):\n self.base[name] = self.get_group_array(v=v) #np.zeros(len(self.base), dtype=int)#self.get_group_array()",
"def pgroup(pynodes, world = False, re = \"\", suffix = \"\"):\n # Initiate return variable\n output = []\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return output\n # Group created on each object transformation\n if not world:\n for o in pynodes:\n # Name var\n the_name = o.name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create group for each specified PyNode\n grp = pm.group(empty = True, name = the_name)\n # Align the pgroup to each PyNode transformation\n transformation.align(grp, o, mode = 'transform')\n # Get object parent\n parent = o.getParent()\n # If the object have parent,\n # Parent the group to object parent\n if parent:\n grp.setParent(parent)\n # Parent the object to pgroup\n o.setParent(grp)\n # Collect group to output\n output.append(grp)\n\n else:\n # Name var\n the_name = pynodes[0].name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create single group\n grp = pm.group(empty = True, name = the_name)\n # Collect group to output\n output.append(grp)\n # Parent all specified PyNodes to pgroup\n pm.parent(pynodes, grp)\n\n return output",
"def _createdOrderedGroups(self):\n self.groups = []\n for _i in xrange(self.layers):\n self.groups.append(pyglet.graphics.OrderedGroup(_i))\n # Create one top level group. Useful for dialog boxes and other stuff\n # that goes over everything else.\n self.top_group1 = pyglet.graphics.OrderedGroup(99)\n self.top_group2 = pyglet.graphics.OrderedGroup(9999)",
"def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])",
"def assignGroupIDs(self):\n components = self.getComponents(graph_dictionary=self.graph_dict)\n self._gIDs = np.zeros(self.no_plants, dtype='object')\n for i in components.keys():\n self._gIDs[components[i]] = 'gID_' + str(i)",
"def get_group_array(self, top_level=False, family=None):\n\n target = None\n famslice = None\n\n if family is None:\n target = self.base\n else:\n if family in ['gas','star','dm']:\n famslice = self.base._get_family_slice(family)\n target = self.base[famslice]\n else:\n if family == 'bh':\n temptarget = self.base.star\n target = temptarget[(temptarget['tform']<0)]\n\n if target is None:\n raise ValueError(\"Family value given is not valid. Use 'gas', 'star', 'dm', or 'bh'\")\n\n if self._dosort is None:\n #if we want to differentiate between top and bottom levels,\n #the halos do need to be in order regardless if dosort is on.\n nparr = np.array([self._halos[i+1].properties['npart'] for i in range(self._nhalos)])\n osort = np.argsort(nparr)[::-1]\n self._sorted_indices = osort + 1\n hcnt = self._sorted_indices\n\n else:\n hcnt = np.arange(len(self._sorted_indices)) + 1\n\n if top_level is False:\n hord = self._sorted_indices\n else:\n hord = self._sorted_indices[::-1]\n hcnt = hcnt[::-1]\n\n if self._all_parts is None:\n f = util.open_(self._ahfBasename+'particles')\n\n cnt = 0\n ar = np.empty(len(target),dtype=np.int32)\n ar[:]=-1\n for i in hord:\n halo = self._halos[i]\n if self._all_parts is not None:\n ids = halo.get_index_list(self.base)\n else:\n f.seek(halo.properties['fstart'],0)\n ids = self._load_ahf_particle_block(f,halo.properties['npart'])\n if family is None:\n ar[ids] = hcnt[cnt]\n else:\n if famslice:\n t_mask = (ids >= famslice.start) & (ids < famslice.stop)\n id_t = ids[np.where(t_mask)] - famslice.start\n else:\n fpos_ar = target.get_index_list(self.base)\n id_t, = np.where(np.in1d(fpos_ar, ids))\n\n ar[id_t] = hcnt[cnt]\n cnt += 1\n return ar",
"def make_protein_group(self):\r\n prot_names = [\r\n 'Ala', 'Arg', 'Asn', 'Asp', 'Cys', 'Gln', 'Glu',\r\n 'Gly', 'His', 'Ile', 'Leu', 'Lys', 'Met', 'Phe',\r\n 'Pro', 'Ser', 'Thr', 'Trp', 'Tyr', 'Val'\r\n ]\r\n self.__make_group_by_res('Protein', prot_names)",
"def get_group_label(i):\n if i//4 == 0:\n return \"buildUpPlay\"\n elif i//4 == 1:\n return \"chanceCreation\"\n elif i//4 == 2:\n return \"defence\"",
"def _create_child_group(self, name) -> \"GroupBase\":\n pass",
"def get_group_array(self):\n raise NotImplementedError",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def _generateNamedContainingPanel(self, obj, **args):\n result = []\n parent = obj.parent\n while parent and (parent.parent != parent):\n if parent.getRole() == pyatspi.ROLE_PANEL:\n label = self._generateLabelAndName(parent)\n if label:\n result.extend(label)\n break\n parent = parent.parent\n return result",
"def Group(self) -> _n_5_t_0:",
"def Group(self) -> _n_5_t_0:",
"def get_grp(self):\n\n grp = -1\n\n if self.depth > 2:\n\n inp = ri.RhinoInput(self.path[2])\n\n grp = inp.get_no()\n\n return grp",
"def generate_parent_space(self):\n\n parent_node = self.node+'_ZERO'\n parent_node = mc.ls(parent_node)\n\n if not parent_node:\n parent_node = utils.get_parent(self.const_node)\n\n parent_node = mc.ls(parent_node)\n\n if parent_node:\n return ['parent', parent_node[0]]\n\n else:\n return []",
"def get_srv_ppgrp_name(self):\n pp_grp_name_lst = list()\n for srv_grp in self.srv_grp_lst:\n pp_grp = list()\n for srv in srv_grp:\n pp_grp.append(\n (srv['name'] + '_pt_in', srv['name'] + '_pt_out'))\n pp_grp_name_lst.append(pp_grp)\n return pp_grp_name_lst",
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def create_lod_groups(lod_dict, name='LOD_grp'):\n geo_lod_list = list()\n cmds.select(clear=True)\n for lod, lod_geo in lod_dict.items():\n for geo in lod_geo:\n if cmds.listRelatives(geo, parent=True):\n cmds.parent(geo, world=True)\n geo_lod_list.append(lod_geo[0])\n\n if geo_lod_list:\n cmds.select(geo_lod_list)\n cmds.LevelOfDetailGroup()\n lod_grp = cmds.rename(cmds.ls(selection=True)[0], name)\n for lod, lod_geo in lod_dict.items():\n if len(lod_geo) > 1:\n lod_parent = cmds.listRelatives(lod_geo[0], parent=True)\n if lod_parent:\n cmds.parent(lod_geo[1:], lod_parent[0])\n return lod_grp",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])",
"def generate_patch_labels(batch_size : int, patch_shape : int, label : int = 1) -> np.ndarray:\n labels = np.full(shape=(batch_size, patch_shape, patch_shape, 1), fill_value=label)\n return labels",
"def set_groupname(diagram, p, g):\n if diagram.startswith('C2'):\n groupname = diagram + '_uu_p%1i%1i%1i.d000.g%i' % \\\n (p[0][0], p[0][1], p[0][2], g[0][0]) \\\n + '_p%1i%1i%1i.d000.g%i' % (p[1][0], p[1][1], p[1][2], g[1][0])\n elif diagram.startswith('C3'):\n groupname = diagram + '_uuu_p%1i%1i%1i.d000.g5' % \\\n (p[0][0][0], p[0][0][1], p[0][0][2]) \\\n + '_p%1i%1i%1i.d000.g%1i' % \\\n (p[1][0], p[1][1], p[1][2], g[1][0]) \\\n + '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2])\n elif diagram == 'C4+D' or diagram == 'C4+C':\n groupname = diagram + '_uuuu_p%1i%1i%1i.d000.g5' % (p[0][0][0], p[0][0][1], p[0][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][0][0], p[1][0][1], p[1][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][1][0], p[1][1][1], p[1][1][2])\n elif diagram == 'C4+B':\n groupname = diagram + '_uuuu_p%1i%1i%1i.d000.g5' % (p[0][0][0], p[0][0][1], p[0][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][0][0], p[1][0][1], p[1][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][1][0], p[1][1][1], p[1][1][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2]) \n\n else:\n print 'in set_groupname: diagram unknown! Quantum numbers corrupted.'\n return\n\n return groupname",
"def splitVRdata(partdata, halodata, pids_halos, pids, coords, vels, nhalo, nsubhalo):\n\n\t# Arrays to hold different subsets of FOF group particles\n\tpids_background = np.array(pids_halos[:nhalo], dtype = 'object')\n\tcoords_background = np.empty(nhalo, dtype = 'object')\n\tvels_background = np.empty(nhalo, dtype = 'object')\n\tpids_sub = np.array(pids_halos[nhalo:], dtype = 'object')\n\tcoords_sub = np.empty(nsubhalo, dtype = 'object')\n\tvels_sub = np.empty(nsubhalo, dtype = 'object')\n\n\t# Create analogues to pids_halos (i.e. array where each\n\t# entry is the coordinates and velocities for each particle\n\t# in that (sub)halo)\n\tpids_all = np.concatenate(pids_halos)\n\tpid_idx = np.argsort(pids)\n\tpid_sorted = pids[pid_idx]\n\tmatch_idx = np.searchsorted(pid_sorted, pids_all)\n\tidxs = pid_idx[match_idx]\n\tcoords_halos = coords[idxs]\n\tvels_halos = vels[idxs]\n\n\t# Indices that mark the first and last particle in each (sub)halo\n\tlinds = partdata['Offset'] + partdata['Offset_unbound']\n\tuinds = linds + partdata['Npart']\n\n\t# (Field) haloes\n\tfor ihalo in range(nhalo):\n\t\tcoords_background[ihalo] = np.array([coord for coord in coords_halos[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_background[ihalo] = np.array([vel for vel in vels_halos[linds[ihalo]:uinds[ihalo]]])\n\n\t# Subhaloes\n\tfor isub in range(nhalo, nhalo + nsubhalo):\n\t\tidx = isub - nhalo\n\t\tcoords_sub[idx] = np.array([coord for coord in coords_halos[linds[isub]:uinds[isub]]])\n\t\tvels_sub[idx] = np.array([vel for vel in vels_halos[linds[isub]:uinds[isub]]])\n\n\t# Get PIDs of all subhaloes hosted by each field halo to create\n\t# arrays containing ALL particles in the FOF group\n\thostHaloID = halodata['hostHaloID']\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\tfor ihalo in range(nhalo):\n\t\tsubs = np.where(hostHaloID == ihalo + 1)[0] - nhalo\n\t\tif subs.size > 0:\n\t\t\t#print(pids_sub[subs])\n\t\t\t#print(np.concatenate(pids_sub[subs]))\n\t\t\tpids_fof[ihalo] = np.concatenate((pids_background[ihalo], np.concatenate(pids_sub[subs])))\n\t\t\tcoords_fof[ihalo] = np.concatenate((coords_background[ihalo], np.concatenate(coords_sub[subs])))\n\t\t\tvels_fof[ihalo] = np.concatenate((vels_background[ihalo], np.concatenate(vels_sub[subs])))\n\t\telse: # This halo hosts no subhaloes\n\t\t\tpids_fof[ihalo] = pids_background[ihalo]\n\t\t\tcoords_fof[ihalo] = coords_background[ihalo]\n\t\t\tvels_fof[ihalo] = vels_background[ihalo]\n\n\t# Construct FOF components dictionary\n\tfofdata = {}\n\tfofdata['FOF/PIDs'] = pids_fof\n\tfofdata['FOF/Coordinates'] = coords_fof\n\tfofdata['FOF/Velocities'] = vels_fof\n\tfofdata['Background/PIDs'] = pids_background\n\tfofdata['Background/Coordinates'] = coords_background\n\tfofdata['Background/Velocities'] = vels_background\n\tfofdata['Satellite/PIDs'] = pids_sub\n\tfofdata['Satellite/Coordinates'] = coords_sub\n\tfofdata['Satellite/Velocities'] = vels_sub\n\n\treturn fofdata",
"def divisor_subgroups(self):\n return [Gamma0_constructor(M) for M in self.level().divisors()]",
"def getHierarchies():",
"def getHierarchies():"
] |
[
"0.60749334",
"0.6051593",
"0.60361016",
"0.59773016",
"0.58477736",
"0.57438767",
"0.5581628",
"0.55605465",
"0.553918",
"0.5472934",
"0.5423763",
"0.53898454",
"0.5385271",
"0.5360189",
"0.5349044",
"0.5325815",
"0.5325815",
"0.53235996",
"0.53167826",
"0.531357",
"0.5309603",
"0.5269836",
"0.52340156",
"0.51393706",
"0.51054597",
"0.5097366",
"0.50937057",
"0.5066123",
"0.5053629",
"0.5053629"
] |
0.70059013
|
0
|
Creates a 'children' array inside each halo's 'properties' listing the halo IDs of its children. Used in case the reading of substructure data from the AHFsupplied _substructure file fails for some reason.
|
def _setup_children(self):
for i in xrange(self._nhalos):
self._halos[i+1].properties['children'] = []
for i in xrange(self._nhalos):
host = self._halos[i+1].properties.get('hostHalo', -2)
if host > -1:
try:
self._halos[host+1].properties['children'].append(i+1)
except KeyError:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i + 1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i + 1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host + 1].properties['children'].append(i + 1)\n except KeyError:\n pass",
"def generate_children(self):\n\n if self.children is not None:\n return\n\n print \"Generating children for %s (%s rows)\" % (self.bbox, self.count)\n\n self.children = [QuadtreeNode(self.tree, b)\n for b in self.bounds.get_children()]\n\n with utils.msgpack_open(self.source_filename) as f:\n with utils.msgpack_open(self.children[0].source_filename, \"w\") as self.children[0].file:\n with utils.msgpack_open(self.children[1].source_filename, \"w\") as self.children[1].file:\n with utils.msgpack_open(self.children[2].source_filename, \"w\") as self.children[2].file:\n with utils.msgpack_open(self.children[3].source_filename, \"w\") as self.children[3].file:\n for row in f:\n for child in self.children:\n if self.tree.latitude_col in row and self.tree.longitude_col in row and child.bbox.contains(row[self.tree.longitude_col], row[self.tree.latitude_col]):\n child.file.write(row)\n child.count += 1\n break\n for child in self.children:\n del child.file\n\n return self.children",
"def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))",
"def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret",
"def children(self) -> Iterable[Heirarchical]:\n return []",
"def Children(self) -> _n_1_t_2:",
"def _get_child_meshes(obj):\n\tif obj.type == 'MESH':\n\t\treturn [obj], []\n\telse:\n\t\tmeshes, other = [], [obj]\n\t\tfor child in obj.children:\n\t\t\tchild_meshes, child_other = _get_child_meshes(child)\n\t\t\tmeshes += child_meshes\n\t\t\tother += child_other\n\n\t\treturn meshes, other",
"def createChildren(self):\n children = []\n posMoves = self.checkPossibleMoves()\n\n for i in range(self.nrOfCars):\n if len(posMoves[i]) > 0:\n for j in range(len(posMoves[i])):\n child = copy(self.changeable)\n child[i] = child[i] + posMoves[i][j]\n children.append(child)\n\n return children",
"def make_children(self):\r\n\t\tchildren = []\r\n\r\n\t\tposMoves = self.generate_possible_moves()\r\n\r\n\t\tfor direction in posMoves:\r\n\t\t\tnewChild = self.copy()\r\n\r\n\t\t\tnewChild.make_move(direction)\r\n\t\t\tnewChild.steps += 1\r\n\t\t\tnewChild.generate_heuristic()\r\n\t\t\tnewChild.eqHash = hash(str(newChild))\r\n\r\n\t\t\tchildren.append(newChild)\r\n\r\n\t\treturn children",
"def create_children(self):\n actionCount = len(self.availableActions)\n self.children = [None] * actionCount\n\n # Split creation into multiple threads if this is the master node.\n if self.level == 0 and USE_THREADS:\n threads = [None] * actionCount\n for idx in range(actionCount):\n threads[idx] = threading.Thread(target=create_child, args=(self, idx))\n threads[idx].start()\n for t in threads:\n t.join()\n else:\n for idx in range(actionCount):\n create_child(self, idx)\n # Stop making child branches if the most recent child branch already found lethal.\n if self.children[idx].get_max_win_strength() == WIN_VALUE:\n self.children = self.children[:idx+1]\n break",
"def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)",
"def addChildren( self, tree ):\n \n children = []\n for x in range(0,len(tree)):\n children.append([])\n \n for node, parent, level, ranges in tree:\n if node != 0:\n children[parent].append(node)\n \n new_tree = []\n for x in range(0,len(tree)):\n new_tree.append( [list(tree[x]), list(children[x])] )\n \n return new_tree",
"def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances",
"def setup_children(self):\n # Only generate new children if there are none\n if len(self.children) == 0:\n # Create the encoder and decoder genes\n encoder = EncoderGene(name='encoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n self.children = [encoder]\n\n decoder = DecoderGene(name='decoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n\n self.children.append(decoder)\n\n pass",
"def init_children(self):\n children = []\n legal_moves = list(chess.Board(self.state).legal_moves)\n for move in legal_moves:\n temp_board = chess.Board(self.state)\n temp_board.push_san(str(move))\n children.append(Node(temp_board.fen(), self))\n self.children = children",
"def children(self): # noqa: ANN201",
"def __initChild(self):\n if self.__child is None:\n self.__child = []\n self._populateChild()",
"def init_hierarchy(self):\n self.create_row(0)\n self.dataset_name = os.path.basename(self.root_path).strip('.zarr')\n\n self.plate_meta['plate'] = {'acquisitions': [{'id': 1,\n 'maximumfieldcount': 1,\n 'name': 'Dataset',\n 'starttime': 0}],\n 'columns': [],\n 'field_count': 1,\n 'name': self.dataset_name,\n 'rows': [],\n 'version': '0.1',\n 'wells': []}\n\n self.plate_meta['plate']['rows'].append({'name': self.rows[0]})\n\n self.well_meta['well'] = {'images': [], 'version': '0.1'}\n self.well_meta = dict(self.well_meta)",
"def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children",
"def get_array_of_children(self):\n children = [self.posXposYposZ,self.posXposYnegZ,self.posXnegYposZ,self.posXposYnegZ,self.negXposYposZ,self.negXposYnegZ,self.negXnegYposZ,self.negXnegYnegZ ] \n return children",
"def create_hierarchy(self):\n\t\tpass",
"def construct(self):\n self._content.sort(key=lambda x: (x.parent, x.index))\n i=0\n j=1\n while i<len(self._content):\n while j<len(self._content):\n if self._content[j].parent == self._content[i].index:\n self._content[i].children.append(self._content[j])\n j+=1\n else:\n break\n i+=1",
"def get_children(self):\n return []",
"def make_tree(\n self,\n recursive: bool = True\n ) -> list:\n children = []\n for file in self.path.iterdir():\n path = file\n\n if path.is_dir() and recursive:\n # try create Study\n try:\n children.append(Study(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotStudyFolder:\n pass\n # try create Experiment\n try:\n children.append(Experiment(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotExperimentFolder:\n pass\n #try create Processing\n try:\n children.append(Processing(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotProcessingFolder:\n pass\n children.append(Folder(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n try:\n if path.name in self._dataset_index:\n children.append(Dataset(path, **self._dataset_state))\n continue\n except (UnsuportedDatasetType, IncompleteDataset, NotADatasetDir):\n pass\n try:\n children.append(JCAMPDX(path, load=False))\n continue\n except (InvalidJcampdxFile, JcampdxVersionError):\n pass\n return children",
"def _getChildrenBom(self, component, level=0, currlevel=0):\n result = []\n bufferdata = []\n if level == 0 and currlevel > 1:\n return bufferdata\n for bomid in component.product_tmpl_id.bom_ids:\n for bomline in bomid.bom_line_ids:\n children=self._getChildrenBom(bomline.product_id, level, currlevel+1)\n bufferdata.extend(children)\n bufferdata.append(bomline.product_id.id)\n result.extend(bufferdata)\n return getCleanList(result)",
"def child_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.property_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result",
"def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items",
"def get_children(self):\n return self.children",
"def get_children(self):\n if not self.FileInfo:\n raise StopIteration(\"No children\")\n offset = self.offset_pad(self.FileInfo.obj_offset + self.ValueLength)\n return self._recurse_children(offset)",
"def read_hierarchy(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n \r\n while lin != 'end':\r\n parts = lin.split()\r\n if lin != 'begin':\r\n ind = self.get_index_by_name(parts[0])\r\n for i in range(1, len(parts)):\r\n self.vertices[ind].children.append(self.get_index_by_name(parts[i]))\r\n lin = self.read_line(fid)\r\n lin = self.read_line(fid)\r\n return lin"
] |
[
"0.7406418",
"0.6090226",
"0.5948918",
"0.58916914",
"0.5828866",
"0.58213186",
"0.5682286",
"0.5663575",
"0.5655514",
"0.5655187",
"0.56005794",
"0.55924815",
"0.5587395",
"0.55825055",
"0.5578867",
"0.55643857",
"0.55631024",
"0.55561423",
"0.55486053",
"0.55143625",
"0.5511066",
"0.5502021",
"0.54998416",
"0.54562247",
"0.5453221",
"0.5452607",
"0.5448342",
"0.5423944",
"0.54084426",
"0.54006386"
] |
0.7425512
|
0
|
write a condensed skid.stat style ascii file from ahf_halos file. header + 1 halo per line. should reproduce `Alyson's idl script' except does not do last 2 columns (Is it a satellite?) and (Is central halo is `false'ly split?). output units are set to Mpc Msun, km/s. user can specify own hubble constant hubble=(H0/(100 km/s/Mpc)), ignoring the snaphot arg for hubble constant (which sometimes has a large roundoff error).
|
def writestat(self, outfile=None, hubble=None):
s = self._base()
mindarkmass = min(s.dark['mass'])
if hubble is None:
hubble = s.properties['h']
if outfile is None: outfile = self._base().filename+'.stat'
print "write stat file to ", outfile
fpout = open(outfile, "w")
header = "#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A"
print >> fpout, header
for ii in np.arange(self._nhalos)+1:
print '%d '%ii,
sys.stdout.flush()
h = self[ii].properties # halo index starts with 1 not 0
## 'Contaminated'? means multiple dark matter particle masses in halo)"
icontam = np.where(self[ii].dark['mass'] > mindarkmass)
if (len(icontam[0]) > 0):
contam = "contam"
else:
contam = "clean"
## may want to add implement satellite test and false central breakup test.
ss = " " # can adjust column spacing
outstring = str(ii)+ss
outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss
outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss
outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss
outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss
outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss
outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss
outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss
outstring += str(h['vrms'])+ss
## pos: convert kpc/h to mpc (no h).
outstring += str(h['pos'][0][0]/hubble)+ss
outstring += str(h['pos'][0][1]/hubble)+ss
outstring += str(h['pos'][0][2]/hubble)+ss
outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss
outstring += str(h['vel'][0][2])+ss
outstring += contam+ss
outstring += "unknown" + \
ss # unknown means sat. test not implemented.
outstring += "unknown"+ss # false central breakup.
print >> fpout, outstring
fpout.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def writehalos(self, snapshot, halos, hubble=None, outfile=None):\n s = snapshot\n grpoutfile = s.filename + \".amiga.grp\"\n statoutfile = s.filename + \".amiga.stat\"\n tipsyoutfile = s.filename + \".amiga.gtp\"\n halos.writegrp(s, halos, grpoutfile)\n halos.writestat(s, halos, statoutfile, hubble=hubble)\n shalos = halos.writetipsy(s, halos, tipsyoutfile, hubble=hubble)\n return shalos",
"def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def generate_hdf(sav_file, instr, lamps, outfil, dtoler=0.6):\n from pypit import pyputils\n msgs = pyputils.get_dummy_logger()\n\n from pypit import arwave\n from pypit import arutils\n arutils.dummy_settings()\n #\n from arclines.pypit_utils import find_peaks\n from arclines.io import load_line_lists\n #\n\n # Read IDL save file\n sav_file = os.getenv('LONGSLIT_DIR')+'calib/linelists/'+sav_file\n s = readsav(sav_file)\n ctbl = Table(s['calib']) # For writing later\n\n # Line list\n alist = load_line_lists(lamps)\n\n # One spectrum?\n ashape = s['archive_arc'].shape\n if len(ashape) == 1:\n nspec = 1\n npix = ashape[0]\n else:\n nspec = s['archive_arc'].shape[0]\n npix = ashape[1]\n\n # Meta data\n mdict = dict(npix=npix, instr=instr,\n lamps=[str(ilamp) for ilamp in lamps], # For writing to hdf5\n nspec=nspec, infil=sav_file, IDairvac='vac')\n print(\"Processing {:d} spectra in {:s}\".format(mdict['nspec'], sav_file))\n\n # Start output\n outh5 = h5py.File(out_path+outfil, 'w')\n outh5.create_group('arcs')\n\n # Loop on spectra\n for ss in range(mdict['nspec']):\n sss = str(ss)\n # Parse\n if nspec == 1:\n spec = s['archive_arc']\n else:\n spec = s['archive_arc'][ss]\n calib = s['calib'][ss]\n # Peaks\n tampl, tcent, twid, w, yprep = find_peaks(spec)\n pixpk = tcent[w]\n pixampl = tampl[w]\n\n # Wavelength solution\n try:\n cfunc = calib['func'].decode('UTF-8')\n except:\n cfunc = calib['func']\n if cfunc == 'CHEBY':\n wv_air = cheby_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'], calib['nord'])\n elif cfunc == 'POLY':\n wv_air = poly_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'])\n else:\n pdb.set_trace()\n raise ValueError(\"Bad calib\")\n # Check blue->red or vice-versa\n if ss == 0:\n if wv_air[0] > wv_air[-1]:\n mdict['bluered'] = False\n else:\n mdict['bluered'] = True\n\n # Peak waves\n if calib['func'] == 'CHEBY':\n twave_air = cheby_val(calib['ffit'], pixpk,\n calib['nrm'], calib['nord'])\n else:\n twave_air = poly_val(calib['ffit'], pixpk, calib['nrm'])\n # Air to Vac\n twave_vac = arwave.airtovac(twave_air*u.AA)\n wave_vac = arwave.airtovac(wv_air*u.AA)\n if ss == 0:\n disp = np.median(np.abs(wave_vac-np.roll(wave_vac,1)))\n print(\"Average dispersion = {:g}\".format(disp))\n # IDs\n idwv = np.zeros_like(pixpk)\n idsion = np.array([str('12345')]*len(pixpk))\n for kk,twv in enumerate(twave_vac.value):\n # diff\n diff = np.abs(twv-alist['wave'])\n if np.min(diff) < dtoler:\n imin = np.argmin(diff)\n idwv[kk] = alist['wave'][imin]\n #idsion[kk] = alist['Ion'][imin] NIST\n idsion[kk] = alist['ion'][imin]\n # Red to blue?\n if mdict['bluered'] is False:\n pixpk = mdict['npix']-1 - pixpk\n # Re-sort\n asrt = np.argsort(pixpk)\n pixpk = pixpk[asrt]\n idwv = idwv[asrt]\n # Reverse\n spec = spec[::-1]\n wave_vac = wave_vac[::-1]\n # Output\n outh5['arcs'].create_group(sss)\n # Datasets\n outh5['arcs'][sss]['wave'] = wave_vac\n outh5['arcs'][sss]['wave'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['spec'] = spec\n outh5['arcs'][sss]['spec'].attrs['flux'] = 'counts'\n outh5['arcs'][sss]['pixpk'] = pixpk\n outh5['arcs'][sss]['ID'] = idwv\n outh5['arcs'][sss]['ID'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['Ion'] = str(idsion)\n # LR wavelengths\n outh5['arcs'][sss]['LR_wave'] = wv_air\n outh5['arcs'][sss]['LR_wave'].attrs['airvac'] = 'air'\n # LR Fit\n outh5['arcs'][sss].create_group('LR_fit')\n for key in ctbl.keys():\n outh5['arcs'][sss]['LR_fit'][key] = ctbl[ss][key]\n\n # Meta data\n outh5.create_group('meta')\n for key in mdict.keys():\n try:\n outh5['meta'][key] = mdict[key]\n except TypeError: # Probably a unicode thing\n if isinstance(mdict[key], list):\n if isinstance(mdict[key][0], basestring):\n tmp = [bytes(item, 'utf-8') for item in mdict[key]]\n else:\n tmp = mdict[key]\n elif isinstance(mdict[key], basestring):\n tmp = str(mdict[key])\n try:\n outh5['meta'][key] = tmp\n except TypeError:\n pdb.set_trace()\n # Close\n outh5.close()\n print('Wrote {:s}'.format(out_path+outfil))",
"def create_pythia_cmnd_files(self):\n \n for higgsname, higgspid in {'H': 35, 'A': 36}.iteritems():\n \n # Get mass and width from 2HDMC LHA file\n lha = LHA(self.lhafile)\n mass = lha.get_block('MASS').get_entry_by_key(higgspid)\n width = lha.get_decay(higgspid).width \n \n outname = self.lhafile.replace('.lha', '_%s.cmnd' % higgsname)\n self.cmndfiles[higgsname] = outname\n \n # Write command file\n with open(outname, 'w') as outfile:\n \n outfile.write('Beams:eCM = 13000.\\n')\n outfile.write('Higgs:useBSM = on\\n')\n \n if higgspid == 36:\n #outfile.write('HiggsBSM:allA3 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2A3 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2A3 = on\\n') # gluon fusion\n elif higgspid == 35:\n #outfile.write('HiggsBSM:allH2 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2H2 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2H2 = on\\n') # gluon fusion\n \n outfile.write('{}:all = A0 A0 1 0 0 {} {} 50.0 0.0\\n'.format(higgspid, mass, width))\n outfile.write('{}:onMode = off\\n'.format(higgspid))\n outfile.write('{}:onIfMatch = 15 -15\\n'.format(higgspid))\n \n outfile.write('15:onMode = off\\n')\n outfile.write('15:onIfMatch = 16 111 211\\n')\n outfile.write('\\n')\n outfile.write('Next:numberShowEvent = 0\\n')\n\n return 0",
"def loadSHandSTH(filename, endian=None):\n # read file with obspy (headers only)\n seis = _read_segy(filename, endian=endian, headonly=True)\n traces = seis.traces\n ntraces = len(traces)\n\n # Load SEGY header\n SH = loadSEGYHeader(seis)\n\n # additional headers for compatibility with older segy module\n SH['filename'] = filename\n SH[\"ntraces\"] = ntraces\n SH[\"ns\"] = SH['number_of_samples_per_data_trace']\n SH[\"dt\"] = SH['sample_interval_in_microseconds'] / 1000 # in milliseconds\n\n # Load all the Trace headers in arrays\n STH = loadSEGYTraceHeader(traces)\n\n return SH, STH",
"def writeHeading(fil, nodes, elems, text=''): #currently only for hexahedral mesh\n fil.write(\" CONTROL INFO 2.2.30\\n\")\n fil.write(\"** GAMBIT NEUTRAL FILE\\n\")\n fil.write('%s\\n' %text)\n fil.write('PROGRAM: Gambit VERSION: 2.2.30\\n')\n fil.write(strftime('%d %b %Y %H:%M:%S\\n', gmtime()))\n fil.write(' NUMNP NELEM NGRPS NBSETS NDFCD NDFVL\\n')\n fil.write('%10i%10i%10i%10i%10i%10i\\n' % (shape(nodes)[0],shape(elems)[0],1,0,3,3))\n fil.write('ENDOFSECTION\\n')",
"def ROCKSTAR_binary():\n header_size = 256 #Bytes, size of the header\n halo_struct_size = 264 #Bytes, properties stored for one halo using dtype structure dt (260 from struct 'halo' in halo.h from ROCKSTAR and \n #4 bytes probably from max_metric from struct 'extra_halo_info' in halo.h)\n bytes_to_header_info = 64 #bytes until the header info starts\n \n dt_header_info = [ \n ('n_halos' , np.int64), #total number of halos in this file\n ('tot_n_particles' , np.int64), #total number of particles in this file \n ('box_size' , np.float32), #side lenght in Mpc/h of simulation box\n ('m_particles' , np.float32), #mass of one particle in h-1Msun\n ('type_particles' , np.int64) #type of particle (either 1=halo, star, gas etc.) \n ]\n \n dt = [\n ('haloid' , np.int64), #int64_t id\n ('x_pos' , np.float32), #float pos[6], 1\n ('y_pos' , np.float32), #float pos[6], 2\n ('z_pos' , np.float32), #float pos[6], 3\n ('pos4' , np.float32), #float pos[6], 4\n ('pos5' , np.float32), #float pos[6], 5\n ('pos6' , np.float32), #float pos[6], 6 \n ('x_corevel' , np.float32), #float corevel[3], 1\n ('y_corevel' , np.float32), #float corevel[3], 2\n ('z_corevel' , np.float32), #float corevel[3], 3 \n ('x_vel_bulk' , np.float32), #float bulkvel[3], 1\n ('y_vel_bulk' , np.float32), #float bulkvel[3], 2\n ('z_vel_bulk' , np.float32), #float bulkvel[3], 3\n ('mhalo' , np.float32), #float m \n ('rvir' , np.float32), #float r \n ('rvir_child' , np.float32), #float child_r\n ('vmax_r' , np.float32), #float vmax_r\n ('mhalo_bound' , np.float32), #float mgrav\n ('vmax' , np.float32), #float vmax\n ('vpeak' , np.float32), #float rvmax\n ('rscale' , np.float32), #float rs\n ('rscale_Klypin' , np.float32), #float klypin_rs\n ('vrms' , np.float32), #float vrms\n ('x_ang' , np.float32), #float J[3], 1\n ('y_ang' , np.float32), #float J[3], 2\n ('z_ang' , np.float32), #float J[3], 3\n ('energy' , np.float32), #float energy \n ('spinParameter' , np.float32), #float spin\n ('mhalo_200b' , np.float32), #float alt_m[4], 1 \n ('mhalo_200c' , np.float32), #float alt_m[4], 2 \n ('mhalo_500c' , np.float32), #float alt_m[4], 3 \n ('mhalo_2500c' , np.float32), #float alt_m[4], 4 \n ('x_off' , np.float32), #float Xoff\n ('v_off' , np.float32), #float Voff\n ('b_to_a' , np.float32), #float b_to_a \n ('c_to_a' , np.float32), #float c_to_a\n ('x_a' , np.float32), #float A[3], 1\n ('y_a' , np.float32), #float A[3], 2\n ('z_a' , np.float32), #float A[3], 3 \n ('b_to_a_500c' , np.float32), #float b_to_a2\n ('c_to_a_500c' , np.float32), #float c_to_a2\n ('x_a_500c' , np.float32), #float A2[3], 1 \n ('y_a_500c' , np.float32), #float A2[3], 2\n ('z_a_500c' , np.float32), #float A2[3], 3 \n ('spin_Bullock' , np.float32), #float bullock_spin\n ('T_U' , np.float32), #float kin_to_pot\n ('Mpseudo_Behroozi', np.float32), #float m_pe_b \n ('Mpseudo_Diemer' , np.float32), #float m_pe_d\n ('rhalf_mass' , np.float32), #float halfmass_radius\n ('n_particles' , np.int64), #int64_t num_p\n ('n_particles_child', np.int64), #int64_t num_child_particles \n ('p_start' , np.int64), #int64_t p_start\n ('descIndex' , np.int64), #int64_t desc\n ('flags' , np.int64), #int64_t flags\n ('n_core' , np.int64), #int64_t n_core\n ('PosUncertainty' , np.float32), #float min_pos_err\n ('VelUncertainty' , np.float32), #float min_vel_err\n ('BulkVelUnc' , np.float32), #float min_bulkvel_err\n ('mmetric' , np.float32) #unclear where it comes from, it might be mmetric \n ]\n \n return header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info",
"def prepare_sushi_input(self):\n \n for higgsname, higgstype in {'H': 12, 'A': 21}.iteritems():\n \n # Parse LHA file\n lha = LHA(self.lhafile)\n \n # Add SusHi-specific blocks\n sushi = Block('SUSHI', comment='SusHi specific')\n sushi.add(Entry([1, 2], comment='Select 2HDM'))\n sushi.add(Entry([2, higgstype], comment='h / H / A'))\n sushi.add(Entry([3, 0], comment='p-p collisions'))\n sushi.add(Entry([4, 13000], comment='E_cm'))\n sushi.add(Entry([5, 2], comment='ggH at NNLO'))\n sushi.add(Entry([6, 2], comment='bbH at NNLO'))\n sushi.add(Entry([7, 2], comment='SM EW content'))\n sushi.add(Entry([19, 1], comment='Verbosity'))\n sushi.add(Entry([20, 0], comment='All processes'))\n lha.add_block(sushi)\n \n # 2HDM block\n thdm = Block('2HDM', '2HDM parameters')\n thdm.add(Entry([1], comment='Type I'))\n lha.add_block(thdm)\n \n # Kinematic distribution parameters\n distrib = Block('DISTRIB', comment='Kinematic requirements')\n distrib.add(Entry([1, 0], comment='Sigma total'))\n distrib.add(Entry([2, 0], comment='Disable pT cut'))\n #distrib.add(Entry([21, GENER_SETTINGS['higgs_pt_min']], comment='Min higgs pT'))\n distrib.add(Entry([3, 0], comment='Disable eta cut'))\n #distrib.add(Entry([32, GENER_SETTINGS['higgs_eta_max']], comment='Max eta'))\n distrib.add(Entry([4, 1], comment='Use eta, not y'))\n lha.add_block(distrib)\n \n # PDF selection\n pdfspec = Block('PDFSPEC')\n pdfspec.add(Entry([1, 'MMHT2014lo68cl.LHgrid'], comment='Name of pdf (lo)'))\n pdfspec.add(Entry([2, 'MMHT2014nlo68cl.LHgrid'], comment='Name of pdf (nlo)'))\n pdfspec.add(Entry([3, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (nnlo)'))\n pdfspec.add(Entry([4, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (n3lo)'))\n pdfspec.add(Entry([10, 0], comment='Set number'))\n lha.add_block(pdfspec)\n \n # Add charm mass\n lha.get_block('SMINPUTS').add(Entry([8, 1.275], comment='m_c'))\n \n # Write output\n suffix = '_%s_sushi.in' % higgsname\n outname = self.lhafile.replace('.lha', suffix)\n self.sushi_input[higgsname] = outname\n \n lha.write(outname)\n \n return 0",
"def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size",
"def writeto(self, output):\n\n hdu = pyfits.PrimaryHDU(data=self.integrated_psf)\n (year, month, day, hour, minute, second, weekday, DOY, DST) = \\\n time.gmtime()\n hdu.header.update(\"DATE\", \"%4d-%02d-%02dT%02d:%02d:%02d\" %\n (year, month, day, hour, minute, second))\n hdu.header.update(\"FILENAME\", os.path.basename(output),\n comment=\"Name of this file\")\n hdu.header.update(\"INSTRUME\", self.instrument, \"Instrument name\")\n\n # Copy some specific keywords from the input header.\n ihdr = self.header\n if \"BUNIT\" in ihdr:\n hdu.header.update(\"BUNIT\", ihdr.get(\"BUNIT\"))\n if \"ERR_BUDG\" in ihdr:\n hdu.header.update(\"ERR_BUDG\", ihdr.get(\"ERR_BUDG\"),\n comment=\"Optical error budget version number\")\n if \"SI_FP\" in ihdr:\n hdu.header.update(\"SI_FP\", ihdr.get(\"SI_FP\"),\n comment=\"Focal plane for OPD calculation\")\n if \"OPD_WFE\" in ihdr:\n hdu.header.update(\"OPD_WFE\", ihdr.get(\"OPD_WFE\"),\n comment=\"OPD wavefront error (nm)\")\n if \"W\" in ihdr:\n hdu.header.update(\"W\", ihdr.get(\"W\"),\n comment=\"Flat width of hex segment (m)\")\n if \"GAP\" in ihdr:\n hdu.header.update(\"GAP\", ihdr.get(\"GAP\"),\n comment=\"Gap width between hex segments (m)\")\n if \"EDGE\" in ihdr:\n hdu.header.update(\"EDGE\", ihdr.get(\"EDGE\"),\n comment=\"Edge roll off (m)\")\n if \"SW\" in ihdr:\n hdu.header.update(\"SW\", ihdr.get(\"SW\"),\n comment=\"Obscuring strut width (m)\")\n if \"HTS\" in ihdr:\n hdu.header.update(\"HTS\", ihdr.get(\"HTS\"),\n comment=\"Height of segment isogrid\")\n if \"HT2\" in ihdr:\n hdu.header.update(\"HT2\", ihdr.get(\"HT2\"),\n comment=\"Height of secondary isogrid\")\n if \"HT3\" in ihdr:\n hdu.header.update(\"HT3\", ihdr.get(\"HT3\"),\n comment=\"Height of tertiary isogrid\")\n if \"FL\" in ihdr:\n hdu.header.update(\"FL\", ihdr.get(\"FL\"),\n comment=\"Focal length (m)\")\n\n # Add some keywords.\n if self.phase_file is not None:\n hdu.header.update(\"PHASE\", os.path.basename(self.phase_file),\n \"Name of phase image file\")\n if self.pupil_file is not None:\n hdu.header.update(\"PUPIL\", os.path.basename(self.pupil_file),\n \"Name of pupil image file\")\n hdu.header.update(\"OVERSAMP\", self.oversample, \"Oversampling factor\")\n hdu.header.update(\"CALCTYPE\", self.type,\n \"32 = single precision, 64 = double precision\")\n hdu.header.update(\"DIAMETER\", self.D, \"pupil diameter (meters)\")\n hdu.header.update(\"ORIG_NX\", self.header[\"naxis1\"],\n \"NAXIS1 in input image\")\n hdu.header.update(\"ORIG_NY\", self.header[\"naxis2\"],\n \"NAXIS2 in input image\")\n\n self.putCoordInfo(hdu)\n\n (wavelengths, weights) = self.filter\n if len(wavelengths) >= 99:\n root_wln = \"WAV\"\n root_wgt = \"WGT\"\n else:\n root_wln = \"WAVELN\"\n root_wgt = \"WEIGHT\"\n for i in range(len(wavelengths)):\n keyword = \"%s%d\" % (root_wln, i + 1)\n hdu.header.update(keyword, wavelengths[i],\n \"wavelength in microns\")\n keyword = \"%s%d\" % (root_wgt, i + 1)\n hdu.header.update(keyword, weights[i], \"weight\")\n\n ofd = pyfits.HDUList(hdu)\n try:\n ofd.writeto(output)\n except IOError as message:\n print(\"ERROR: Output file has NOT been written; \" \\\n \"use <psf>.writeto(output)\")\n print(message)\n return\n self.output_written = True",
"def extract_hrc_data(obsid, data_dir):\n#\n#--- extract fits data\n#\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n#\n#--- create directories and move the data into them\n#\n cmd = 'mkdir primary secondary'\n os.system(cmd)\n \n cmd = 'mv *dtf1*fits* *fov*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'mv *bpix1*fits* *evt1*fits* *msk1*fits* *mtl1*fits* \\\n *std_dtfstat1.fits* *std_flt1.fits* ./secondary/.'\n os.system(cmd)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=pcad\\n'\n line = line + 'subdetector=aca\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n cmd = 'mv *asol*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'rm -rf *fits* zline zout'\n os.system(cmd)\n\n hdir = data_dir + '/' + str(obsid)\n if os.path.isdir(hdir):\n cmd = 'rm -rf ' + hdir + '/*'\n os.system(cmd)\n else:\n cmd = 'mkdir ' + hdir \n os.system(cmd)\n\n cmd = 'chmod 774 primary/* secondary/*'\n os.system(cmd)\n\n#\n#--- check whether there are duplicated fits files extracted; if so, remove older ones\n#\n h_list = ['dtf1', 'fov1', 'asol1']\n sdir = 'primary'\n remove_duplicate(h_list, sdir)\n\n h_list = ['bpix1', 'evt1', 'msk1', 'mtl1', 'std_dtfstat1', 'std_flt1']\n sdir = 'secondary'\n remove_duplicate(h_list, sdir)\n\n cmd = 'mv primary secondary ' + hdir + '/.'\n os.system(cmd)\n\n cmd = 'rm -rf ' + hdir + '/analysis/* ' \n os.system(cmd)\n\n return check_data_exist(hdir)",
"def fix_hppos(f):\n with open(f, 'r+') as file:\n d = file.readlines()\n file.seek(0)\n for i in d[:-1]: # Write all but last line\n file.write(i)\n l = d[-1].split(' ')\n if len(l) == 5: # If final line is complete, write it too\n file.write(d[-1])\n file.truncate() # Remove bad stuff",
"def get_hershey():\n hershey_path = pkg_resources.resource_filename('pymicrofluidics', 'data/hershey.txt')\n hershey_table = {}\n first = True\n with open(hershey_path) as openfileobject:\n for tline in openfileobject:\n if re.search('Ascii',tline):\n if first == False:\n newline = hershey_table[asci]['coord'].split('-1,-1,')\n newline = [list(filter(None, x.split(','))) for x in newline if len(x)>0]\n hershey_table[asci]['coord'] = [np.array([[float(y[x]),float(y[x+1])] for x in range(0,len(y)-1,2)])/21 for y in newline]\n if len(hershey_table[asci]['coord'])>0:\n middle = 0.5*(np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])+np.min(np.concatenate(hershey_table[asci]['coord'])[:,0]))\n #middle = float(middle)\n hershey_table[asci]['coord'] = [np.array([[x[0]-middle,x[1]] for x in y]) \n for y in hershey_table[asci]['coord']]\n hershey_table[asci]['width'] = np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])-np.min(np.concatenate(hershey_table[asci]['coord'])[:,0])\n else:\n hershey_table[asci]['width'] = 0.5\n asci = int(re.findall('.*Ascii (\\d+).*',tline)[0])\n width = float(re.findall('\\d+,\\s*(\\d+),.*',tline)[0])\n hershey_table[asci] = {'coord': '', 'width': width}\n first = False\n else:\n newline = tline.rstrip('\\n')\n hershey_table[asci]['coord'] = hershey_table[asci]['coord']+newline\n return hershey_table",
"def updateSubhalos_old(host, file):\n f = open(file, 'r')\n line = f.readline()\n i = 0\n while line != '':\n if line[0:5] == \"#tree\":\n #if i%10000 == 0:\n #print 'subhalo finder scanned ', i, ' trees'\n i+=1\n num = int(line[6::])\n # Deal with a=0 halo independently\n line = f.readline()\n sub = MTH.MTHalo(line)\n if sub.pid == host.ID: # not upid. only subhalos, not subsub etc.\n #build tree, add to subhalo list of host\n tree = MT.MergerTree(file, num)\n tree.haloList.append(sub)\n if sub.num_prog ==0:\n tree.progenitors.append(sub)\n\n # Now deal with all other halos in the tree\n index = 1\n line = f.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = f.readline()\n index +=1\n # add a=1 subhalo to subhalo list of host (maybe should add tree?)\n host.subhalos.append(sub)\n else:\n line = f.readline()\n else:\n line = f.readline()\n f.close()",
"def _make_haloupdate(self, f, fixed, halos, **kwargs):\n return",
"def aluminum_hexathiohypodiphosphate():\n\n positions = [[0.000000, 0.000000, 0.000000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.000000, 0.000000, 0.500000],\n [0.197847, 0.276435, 0.101916],\n [0.197847, 0.723565, 0.898084],\n [0.802153, 0.276435, 0.898084],\n [0.802153, 0.723565, 0.101916],\n [0.776404, 0.800507, 0.601208],\n [0.776404, 0.199493, 0.398792],\n [0.223596, 0.800507, 0.398792],\n [0.223596, 0.199493, 0.601208]]\n\n species = ['Al','Al','P','P','S','S','S','S','S','S','S','S']\n\n bravais = 'orthorhombic'\n\n space_group = 16\n lattice_parameters = {'a': Set(5.71230345, 'angstrom'),\n 'b': Set(5.71644625, 'angstrom'),\n 'c': Set(11.46678755,'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data",
"def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args",
"def HI_mass(mhalo,aa):\n zp1 = 1.0/aa\n zz = zp1-1\n # Set the parameters of the HOD, using the \"simple\" form.\n # MHI ~ M0 x^alpha Exp[-1/x] x=Mh/Mmin\n # from the Appendix of https://arxiv.org/pdf/1804.09180.pdf, Table 6.\n # Fits valid for 1<z<6:\n mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)\n alp = (1+2*zz)/(2+2*zz)\n # Work out the HI mass/weight per halo -- ignore prefactor.\n xx = mhalo/mcut+1e-10\n mHI = xx**alp * np.exp(-1/xx)\n # Scale to some standard number in the right ball-park.\n mHI*= 2e9*np.exp(-1.9*zp1+0.07*zp1**2)\n # Return the HI masses.\n return(mHI)\n #",
"def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()",
"def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r",
"def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)",
"def test_single_hf():\n test_file = os.path.join(DATA_DIR, 'test08.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n pprint(info)\n assert info['prog'] == '17 1.0.1' # CRYSTAL version\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -5.7132081224317E+02 * Ha # energy in eV\n assert info['k'] == '8x8x8' # Monkhorst-Pack mesh\n assert info['H'] == 'Hartree-Fock'\n assert info['ncycles'][0] == 6\n assert not info['electrons']['basis_set']['ecp']\n assert info['electrons']['basis_set']['bs']['Si']",
"def org_sil_check_os2_metrics_match_hhea(ttFont):\n\n filename = os.path.basename(ttFont.reader.file.name)\n\n # Check both OS/2 and hhea are present.\n missing_tables = False\n\n required = [\"OS/2\", \"hhea\"]\n for key in required:\n if key not in ttFont:\n missing_tables = True\n yield FAIL,\\\n Message(f'lacks-{key}',\n f\"{filename} lacks a '{key}' table.\")\n\n if missing_tables:\n return\n\n # OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent\n if ttFont[\"OS/2\"].sTypoAscender != ttFont[\"hhea\"].ascent:\n yield FAIL,\\\n Message(\"ascender\",\n f\"OS/2 sTypoAscender ({ttFont['OS/2'].sTypoAscender})\"\n f\" and hhea ascent ({ttFont['hhea'].ascent})\"\n f\" must be equal.\")\n elif ttFont[\"OS/2\"].sTypoDescender != ttFont[\"hhea\"].descent:\n yield FAIL,\\\n Message(\"descender\",\n f\"OS/2 sTypoDescender ({ttFont['OS/2'].sTypoDescender})\"\n f\" and hhea descent ({ttFont['hhea'].descent})\"\n f\" must be equal.\")\n elif ttFont[\"OS/2\"].sTypoLineGap != ttFont[\"hhea\"].lineGap:\n yield FAIL,\\\n Message(\"lineGap\",\n f\"OS/2 sTypoLineGap ({ttFont['OS/2'].sTypoLineGap})\"\n f\" and hhea lineGap ({ttFont['hhea'].lineGap})\"\n f\" must be equal.\")\n else:\n yield PASS, (\"OS/2.sTypoAscender/Descender values\"\n \" match hhea.ascent/descent.\")",
"def check_ASRs(phifull,poscar,sposcar,filename):\n natoms=len(poscar[\"types\"])\n ntot=len(sposcar[\"types\"])\n \n f=StringIO.StringIO()\n tot=0\n nblocks=0\n for ll in xrange(ntot):\n tot+=phifull[0,0,0,0,1,2,3,ll]\n f.write(\"{:>20.10f}\\n\".format(tot))\n ffinal=open(filename,\"w\")\n ffinal.write(f.getvalue())\n f.close()\n ffinal.close()",
"def write_sff_header(header, fh, num=None):\r\n\r\n lines = [\"Common Header:\"]\r\n if (num is not None):\r\n header[\"# of Flows\"] = num\r\n\r\n lines.extend([\" %s:\\t%s\" % (param, header[param])\r\n for param in header])\r\n fh.write(\"\\n\".join(lines) + \"\\n\\n\")",
"def _write_halo_pars(self):\n # e.g. pars['lam'], pars['a']\n # should this be part of WCS?\n c1 = fits.BinTableHDU.from_columns(\n [fits.Column(name='lam', array=c._make_array(self.lam),\n format='E', unit=self.lam_unit)])\n c2 = fits.BinTableHDU.from_columns(\n [fits.Column(name='theta', array=c._make_array(self.theta),\n format='E', unit='arcsec')])\n c3 = fits.BinTableHDU.from_columns(\n [fits.Column(name='taux', array=c._make_array(self.taux),\n format='E', unit='')])\n return [c1, c2, c3]",
"def writeTableHeader(self, fileName, variant=0):\r\n # research\r\n w = slicer.modules.NeedleFinderWidget\r\n l = w.logic\r\n if not variant:\r\n l.exportEvaluation(['user','case','maxTipHD','maxHD', 'avgHD', 'stdHD', 'medHD',\r\n 'nNeedles','nOutliers','outliers',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n 'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)\r\n else:\r\n l.exportEvaluation(['user','case','tipHD','HD', 'man.-seg_', 'ID1', 'ID2',\r\n 'outlier?',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n #'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)",
"def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()",
"def dump_signal_hdf(args):\n # construct & open output HDF5:\n outfile = args.out if (args.out is not None) else \"./samples.hdf5\"\n hdf = h5py.File(outfile, 'w-') # (throw error if file already exists)\n scaled_gp = hdf.create_group('scaled')\n if args.segmentation:\n states_gp = hdf.create_group('states')\n\n # loop thru polya calls output file and append samples to HDF5:\n curr_read = None\n curr_samples = []\n if args.segmentation:\n curr_states = []\n for row in tqdm(PolyaIterator(args.polya)):\n # create a new read dataset based on current samples if detect a switch:\n if row['readname'] != curr_read:\n if curr_read is not None:\n try:\n scaled_gp.create_dataset(curr_read, data=np.array(curr_samples, dtype=np.float32))\n if args.segmentation:\n states_gp.create_dataset(curr_read, data=np.array(curr_states, dtype='S10'))\n except:\n pass\n # reset current read & samples\n curr_read = row['readname']\n curr_samples = []\n if args.segmentation:\n curr_states = []\n hdf.flush()\n # otherwise append raw sample:\n curr_samples.append(float(row['scaled']))\n if args.segmentation:\n curr_states.append(row['state'])\n # append final read & close HDF5 file handle:\n try:\n scaled_gp.create_dataset(curr_read, data=np.array(curr_samples, dtype=np.float32))\n if args.segmentation:\n states_gp.create_dataset(curr_read, data=np.array(curr_states, dtype='S10'))\n except:\n pass\n hdf.flush()\n hdf.close()\n\n # print finishing message:\n print(\"[dump_signal.py] HDF5 file of (scaled) picoampere signals written to: {}\".format(outfile))"
] |
[
"0.66609514",
"0.6492009",
"0.5822685",
"0.58220613",
"0.58087313",
"0.55292356",
"0.5503008",
"0.544077",
"0.5423525",
"0.53699297",
"0.53241825",
"0.5292673",
"0.5285925",
"0.52806836",
"0.5273414",
"0.5246686",
"0.52335835",
"0.5232767",
"0.51816946",
"0.5146505",
"0.5145152",
"0.51434064",
"0.51410306",
"0.5128516",
"0.5119018",
"0.5113539",
"0.5088516",
"0.50872546",
"0.5085352",
"0.5074739"
] |
0.6493145
|
1
|
write halos to tipsy file (write as stars) from ahf_halos file. returns a shapshot where each halo is a star particle. user can specify own hubble constant hubble=(H0/(100 km/s/Mpc)), ignoring the snaphot arg for hubble constant (which sometimes has a large roundoff error).
|
def writetipsy(self, outfile=None, hubble=None):
from . import analysis
from . import tipsy
from .analysis import cosmology
from snapshot import _new as new
import math
s = self._base()
if outfile is None: outfile = s.filename+'.gtp'
print "write tipsy file to ", outfile
sout = new(star=self._nhalos) # create new tipsy snapshot written as halos.
sout.properties['a'] = s.properties['a']
sout.properties['z'] = s.properties['z']
sout.properties['boxsize'] = s.properties['boxsize']
if hubble is None: hubble = s.properties['h']
sout.properties['h'] = hubble
### ! dangerous -- rho_crit function and unit conversions needs simplifying
rhocrithhco = cosmology.rho_crit(s, z=0, unit="Msol Mpc^-3 h^2")
lboxkpc = sout.properties['boxsize'].ratio("kpc a")
lboxkpch = lboxkpc*sout.properties['h']
lboxmpch = lboxkpc*sout.properties['h']/1000.
tipsyvunitkms = lboxmpch * 100. / (math.pi * 8./3.)**.5
tipsymunitmsun = rhocrithhco * lboxmpch**3 / sout.properties['h']
print "transforming ", self._nhalos, " halos into tipsy star particles"
for ii in xrange(self._nhalos):
h = self[ii+1].properties
sout.star[ii]['mass'] = h['m']/hubble / tipsymunitmsun
## tipsy units: box centered at 0. (assume 0<=x<=1)
sout.star[ii]['x'] = h['pos'][0][0]/lboxmpch - 0.5
sout.star[ii]['y'] = h['pos'][0][1]/lboxmpch - 0.5
sout.star[ii]['z'] = h['pos'][0][2]/lboxmpch - 0.5
sout.star[ii]['vx'] = h['vel'][0][0]/tipsyvunitkms
sout.star[ii]['vy'] = h['vel'][0][1]/tipsyvunitkms
sout.star[ii]['vz'] = h['vel'][0][2]/tipsyvunitkms
sout.star[ii]['eps'] = h['r']/lboxkpch
sout.star[ii]['metals'] = 0.
sout.star[ii]['phi'] = 0.
sout.star[ii]['tform'] = 0.
print "writing tipsy outfile %s"%outfile
sout.write(fmt=tipsy.TipsySnap, filename=outfile)
return sout
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def writehalos(self, snapshot, halos, hubble=None, outfile=None):\n s = snapshot\n grpoutfile = s.filename + \".amiga.grp\"\n statoutfile = s.filename + \".amiga.stat\"\n tipsyoutfile = s.filename + \".amiga.gtp\"\n halos.writegrp(s, halos, grpoutfile)\n halos.writestat(s, halos, statoutfile, hubble=hubble)\n shalos = halos.writetipsy(s, halos, tipsyoutfile, hubble=hubble)\n return shalos",
"def writetipsy(self, snapshot, halos, tipsyoutfile, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = snapshot\n outfile = tipsyoutfile\n nhalos = halos._nhalos\n nstar = nhalos\n sout = new(star=nstar) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None:\n hubble = s.properties['h']\n sout.properties['h'] = hubble\n # ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc * sout.properties['h']\n lboxmpch = lboxkpc * sout.properties['h'] / 1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8. / 3.) ** .5\n tipsymunitmsun = rhocrithhco * lboxmpch ** 3 / sout.properties['h']\n\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties\n sout.star[ii]['mass'] = h['mass'] / hubble / tipsymunitmsun\n # tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['Xc'] / lboxkpch - 0.5\n sout.star[ii]['y'] = h['Yc'] / lboxkpch - 0.5\n sout.star[ii]['z'] = h['Zc'] / lboxkpch - 0.5\n sout.star[ii]['vx'] = h['VXc'] / tipsyvunitkms\n sout.star[ii]['vy'] = h['VYc'] / tipsyvunitkms\n sout.star[ii]['vz'] = h['VZc'] / tipsyvunitkms\n sout.star[ii]['eps'] = h['Rvir'] / lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout",
"def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1",
"def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()",
"def _make_haloupdate(self, f, fixed, halos, **kwargs):\n return",
"def write_hypnogram(hypno, filename, seconds_per_annotation=30, \n comment=None, overwrite=False):\n assert not ospath.exists(filename) or overwrite, \\\n 'File already exists, no overwrite'\n hypno = np.repeat(hypno, seconds_per_annotation)\n hypno_str = hypno2time(hypno)\n if comment is not None:\n comment = comment.replace('\\n', '\\n*')\n hypno_str = '*' + comment + '\\n' + hypno_str\n hypno_str = hypno_str.replace('\\n\\n', '\\n')\n with open(filename, 'w') as f:\n f.write(hypno_str) \n return True",
"def HI_mass(mhalo,aa):\n zp1 = 1.0/aa\n zz = zp1-1\n # Set the parameters of the HOD, using the \"simple\" form.\n # MHI ~ M0 x^alpha Exp[-1/x] x=Mh/Mmin\n # from the Appendix of https://arxiv.org/pdf/1804.09180.pdf, Table 6.\n # Fits valid for 1<z<6:\n mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)\n alp = (1+2*zz)/(2+2*zz)\n # Work out the HI mass/weight per halo -- ignore prefactor.\n xx = mhalo/mcut+1e-10\n mHI = xx**alp * np.exp(-1/xx)\n # Scale to some standard number in the right ball-park.\n mHI*= 2e9*np.exp(-1.9*zp1+0.07*zp1**2)\n # Return the HI masses.\n return(mHI)\n #",
"def khan_tophat_morphology(img,mask = None,debugOption = 'off'):\n \n #Previous version.\n #selm = morphology.disk(8)\n #closedImg = morphology.closing(tempTophat1, selm)\n #closeOpenImg = morphology.closing(closedImg, selm)\n strel1 = np.array([[0,0,1,1,1,1,1,1,1,1,1,0,0],\n [0,1,1,1,1,1,1,1,1,1,1,1,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,1,1,1,1,1,1,1,1,1,1,1,0],\n [0,0,1,1,1,1,1,1,1,1,1,0,0]],dtype='uint8')\n\n strel2 = np.array([[0,0,1,1,1,1,1,0,0],\n [0,1,1,1,1,1,1,1,0],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1,1],\n [0,1,1,1,1,1,1,1,0],\n [0,0,1,1,1,1,1,0,0]],dtype='uint8')\n \n tempTophat1 = img\n closedImg = cv2.morphologyEx(tempTophat1,cv2.MORPH_CLOSE,strel1)\n closeOpenImg = cv2.morphologyEx(closedImg,cv2.MORPH_CLOSE,strel1)\n temp1 = closeOpenImg < 0\n topHatMdfd= np.subtract(closeOpenImg,self.claheImg)\n\n temp2 = topHatMdfd <0\n \n if mask != None:\n tempMaskImg = mask\n tempRow,tempCol = tempMaskImg.shape\n\n for row in range(tempRow):\n for col in range(tempCol):\n if tempMaskImg[row,col] > 0:\n tempMaskImg[row,col] = 1\n else:\n tempMaskImg[row,col] = 0\n\n tempMaskImg = tempMaskImg.astype('uint8')\n maskImg = morphology.erosion(tempMaskImg,self.strucEle2) \n result = np.multiply(topHatMdfd,maskImg)\n \n\n else:\n result = topHatMdfd\n \n\n #Debugging\n if defaultoptions == 'on'\n print(\"tempTophat1 : {} closeOpenImg : {} topHatMdfd : {} \".format(tempTophat1.dtype,closeOpenImg.dtype,topHatMdfd.dtype))\n print(\"result : {} \".format(result.dtype))\n print(\"tempMaskImg : {} maskStruc : {} maskImg : {} \".format(tempMaskImg.dtype,maskStruc.dtype,maskImg.dtype))\n \n plt.axis(\"off\")\n plt.title('tophat')\n plt.imshow(result,cmap='gray')\n plt.show()\n\n return result",
"def export_heads(heads_file, grid, hdry, hnflo,\n kstpkper=(0, 0), levels=None, interval=None,\n export_water_table=True, export_depth_to_water=False,\n export_layers=False, land_surface_elevations=None,\n output_path='postproc', suffix=''):\n if np.isscalar(kstpkper[0]):\n kstpkper = [kstpkper]\n print('Exporting heads...')\n print('file: {}'.format(heads_file))\n\n pdfs_dir, rasters_dir, shps_dir = make_output_folders(output_path)\n\n outfiles = []\n for kstp, kper in kstpkper:\n print('stress period {}, timestep {}'.format(kper, kstp))\n # Heads output\n hdsobj = bf.HeadFile(heads_file)\n hds = hdsobj.get_data(kstpkper=(kstp, kper))\n \n if export_water_table or export_depth_to_water:\n wt = get_water_table(hds, nodata=hdry)\n wt[(wt > 9999) | (wt < 0)] = np.nan\n outfile = '{}/wt_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/wt_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, wt, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, wt, grid, levels=levels, interval=interval)\n outfiles += [outfile, ctr_outfile]\n \n if export_depth_to_water:\n if land_surface_elevations is None:\n raise ValueError(('export_heads: export_depth_to_water option '\n 'requires specification of the land surface'))\n if not isinstance(land_surface_elevations, np.ndarray):\n land_surface_elevations = np.loadtxt(land_surface_elevations)\n \n # Depth to water\n dtw = land_surface_elevations - wt \n\n # Overpressurization\n op = dtw.copy()\n # For DTW, mask areas of overpressurization;\n # For Overpressurization, mask areas where water table is below land surface\n op = np.ma.masked_array(op, mask=op > 0)\n dtw = np.ma.masked_array(dtw, mask=dtw < 0)\n \n if np.max(dtw) > 0:\n #dtw_levels = None\n #if interval is not None:\n # dtw_levels = np.linspace(0, np.nanmax(dtw), interval)\n outfile = '{}/dtw_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/dtw_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, dtw, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, dtw, grid, interval=interval)\n outfiles += [outfile, ctr_outfile]\n else:\n print('Water table is above land surface everywhere, skipping depth to water.')\n \n if np.nanmin(op) < 0:\n #op_levels = None\n #if interval is not None:\n # op_levels = np.linspace(0, np.nanmin(op), interval)\n outfile = '{}/op_per{}_stp{}{}.tif'.format(rasters_dir, kper, kstp, suffix)\n ctr_outfile = '{}/op_ctr_per{}_stp{}{}.shp'.format(shps_dir, kper, kstp, suffix)\n export_array(outfile, op, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, op, grid, interval=interval)\n outfiles += [outfile, ctr_outfile]\n else:\n print('No overpressurization, skipping.')\n \n\n hds[(hds > 9999) | (hds < 0)] = np.nan\n\n if export_layers:\n for k, h in enumerate(hds):\n outfile = '{}/hds_lay{}_per{}_stp{}{}.tif'.format(rasters_dir, k, kper, kstp, suffix)\n ctr_outfile = '{}/hds_ctr_lay{}_per{}_stp{}{}.shp'.format(shps_dir, k, kper, kstp, suffix)\n export_array(outfile, h, grid, nodata=hnflo)\n export_array_contours(ctr_outfile, h, grid, levels=levels, interval=interval,\n )\n outfiles += [outfile, ctr_outfile]\n return outfiles",
"def aluminum_hexathiohypodiphosphate():\n\n positions = [[0.000000, 0.000000, 0.000000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.000000, 0.000000, 0.500000],\n [0.197847, 0.276435, 0.101916],\n [0.197847, 0.723565, 0.898084],\n [0.802153, 0.276435, 0.898084],\n [0.802153, 0.723565, 0.101916],\n [0.776404, 0.800507, 0.601208],\n [0.776404, 0.199493, 0.398792],\n [0.223596, 0.800507, 0.398792],\n [0.223596, 0.199493, 0.601208]]\n\n species = ['Al','Al','P','P','S','S','S','S','S','S','S','S']\n\n bravais = 'orthorhombic'\n\n space_group = 16\n lattice_parameters = {'a': Set(5.71230345, 'angstrom'),\n 'b': Set(5.71644625, 'angstrom'),\n 'c': Set(11.46678755,'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data",
"def create_pythia_cmnd_files(self):\n \n for higgsname, higgspid in {'H': 35, 'A': 36}.iteritems():\n \n # Get mass and width from 2HDMC LHA file\n lha = LHA(self.lhafile)\n mass = lha.get_block('MASS').get_entry_by_key(higgspid)\n width = lha.get_decay(higgspid).width \n \n outname = self.lhafile.replace('.lha', '_%s.cmnd' % higgsname)\n self.cmndfiles[higgsname] = outname\n \n # Write command file\n with open(outname, 'w') as outfile:\n \n outfile.write('Beams:eCM = 13000.\\n')\n outfile.write('Higgs:useBSM = on\\n')\n \n if higgspid == 36:\n #outfile.write('HiggsBSM:allA3 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2A3 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2A3 = on\\n') # gluon fusion\n elif higgspid == 35:\n #outfile.write('HiggsBSM:allH2 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2H2 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2H2 = on\\n') # gluon fusion\n \n outfile.write('{}:all = A0 A0 1 0 0 {} {} 50.0 0.0\\n'.format(higgspid, mass, width))\n outfile.write('{}:onMode = off\\n'.format(higgspid))\n outfile.write('{}:onIfMatch = 15 -15\\n'.format(higgspid))\n \n outfile.write('15:onMode = off\\n')\n outfile.write('15:onIfMatch = 16 111 211\\n')\n outfile.write('\\n')\n outfile.write('Next:numberShowEvent = 0\\n')\n\n return 0",
"def ROCKSTAR_binary():\n header_size = 256 #Bytes, size of the header\n halo_struct_size = 264 #Bytes, properties stored for one halo using dtype structure dt (260 from struct 'halo' in halo.h from ROCKSTAR and \n #4 bytes probably from max_metric from struct 'extra_halo_info' in halo.h)\n bytes_to_header_info = 64 #bytes until the header info starts\n \n dt_header_info = [ \n ('n_halos' , np.int64), #total number of halos in this file\n ('tot_n_particles' , np.int64), #total number of particles in this file \n ('box_size' , np.float32), #side lenght in Mpc/h of simulation box\n ('m_particles' , np.float32), #mass of one particle in h-1Msun\n ('type_particles' , np.int64) #type of particle (either 1=halo, star, gas etc.) \n ]\n \n dt = [\n ('haloid' , np.int64), #int64_t id\n ('x_pos' , np.float32), #float pos[6], 1\n ('y_pos' , np.float32), #float pos[6], 2\n ('z_pos' , np.float32), #float pos[6], 3\n ('pos4' , np.float32), #float pos[6], 4\n ('pos5' , np.float32), #float pos[6], 5\n ('pos6' , np.float32), #float pos[6], 6 \n ('x_corevel' , np.float32), #float corevel[3], 1\n ('y_corevel' , np.float32), #float corevel[3], 2\n ('z_corevel' , np.float32), #float corevel[3], 3 \n ('x_vel_bulk' , np.float32), #float bulkvel[3], 1\n ('y_vel_bulk' , np.float32), #float bulkvel[3], 2\n ('z_vel_bulk' , np.float32), #float bulkvel[3], 3\n ('mhalo' , np.float32), #float m \n ('rvir' , np.float32), #float r \n ('rvir_child' , np.float32), #float child_r\n ('vmax_r' , np.float32), #float vmax_r\n ('mhalo_bound' , np.float32), #float mgrav\n ('vmax' , np.float32), #float vmax\n ('vpeak' , np.float32), #float rvmax\n ('rscale' , np.float32), #float rs\n ('rscale_Klypin' , np.float32), #float klypin_rs\n ('vrms' , np.float32), #float vrms\n ('x_ang' , np.float32), #float J[3], 1\n ('y_ang' , np.float32), #float J[3], 2\n ('z_ang' , np.float32), #float J[3], 3\n ('energy' , np.float32), #float energy \n ('spinParameter' , np.float32), #float spin\n ('mhalo_200b' , np.float32), #float alt_m[4], 1 \n ('mhalo_200c' , np.float32), #float alt_m[4], 2 \n ('mhalo_500c' , np.float32), #float alt_m[4], 3 \n ('mhalo_2500c' , np.float32), #float alt_m[4], 4 \n ('x_off' , np.float32), #float Xoff\n ('v_off' , np.float32), #float Voff\n ('b_to_a' , np.float32), #float b_to_a \n ('c_to_a' , np.float32), #float c_to_a\n ('x_a' , np.float32), #float A[3], 1\n ('y_a' , np.float32), #float A[3], 2\n ('z_a' , np.float32), #float A[3], 3 \n ('b_to_a_500c' , np.float32), #float b_to_a2\n ('c_to_a_500c' , np.float32), #float c_to_a2\n ('x_a_500c' , np.float32), #float A2[3], 1 \n ('y_a_500c' , np.float32), #float A2[3], 2\n ('z_a_500c' , np.float32), #float A2[3], 3 \n ('spin_Bullock' , np.float32), #float bullock_spin\n ('T_U' , np.float32), #float kin_to_pot\n ('Mpseudo_Behroozi', np.float32), #float m_pe_b \n ('Mpseudo_Diemer' , np.float32), #float m_pe_d\n ('rhalf_mass' , np.float32), #float halfmass_radius\n ('n_particles' , np.int64), #int64_t num_p\n ('n_particles_child', np.int64), #int64_t num_child_particles \n ('p_start' , np.int64), #int64_t p_start\n ('descIndex' , np.int64), #int64_t desc\n ('flags' , np.int64), #int64_t flags\n ('n_core' , np.int64), #int64_t n_core\n ('PosUncertainty' , np.float32), #float min_pos_err\n ('VelUncertainty' , np.float32), #float min_vel_err\n ('BulkVelUnc' , np.float32), #float min_bulkvel_err\n ('mmetric' , np.float32) #unclear where it comes from, it might be mmetric \n ]\n \n return header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info",
"def preprocess_phys_hypnograms(dataset_folder_path):\n import numpy as np\n from wfdb.io import rdann\n from utime.io.high_level_file_loaders import load_psg\n from utime.bin.extract_hypno import to_ids\n from utime.hypnogram import SparseHypnogram\n from utime import Defaults\n\n # Get list of subject folders\n subject_folders = glob(os.path.join(dataset_folder_path, \"tr*\"))\n LABEL_MAP = {\n 'N1': \"N1\",\n 'N2': \"N2\",\n 'N3': \"N3\",\n 'R': \"REM\",\n 'W': \"W\",\n }\n\n for i, folder in enumerate(subject_folders):\n name = os.path.split(os.path.abspath(folder))[-1]\n print(f\"{i+1}/{len(subject_folders)}\", name)\n\n # Get sleep-stages\n edf_file = folder + f\"/{name}.mat\"\n org_hyp_file = folder + f\"/{name}.arousal\"\n new_hyp_file = folder + f\"/{name}.arousal.st\"\n out_path = new_hyp_file.replace(\".arousal.st\", \"-HYP.ids\")\n if os.path.exists(out_path):\n print(\"Exists, skipping...\")\n continue\n if os.path.exists(org_hyp_file):\n os.rename(org_hyp_file, new_hyp_file)\n\n psg, header = load_psg(edf_file, load_channels=['C3-M2'])\n hyp = rdann(new_hyp_file[:-3], \"st\")\n\n sample_rate = header[\"sample_rate\"]\n psg_length_sec = len(psg)/sample_rate\n\n pairs = zip(hyp.aux_note, hyp.sample)\n stages = [s for s in pairs if not (\"(\" in s[0] or \")\" in s[0])]\n stages = [(s[0], int(s[1]/sample_rate)) for s in stages]\n stages, starts = map(list, zip(*stages))\n\n if starts[0] != 0:\n i = [0] + starts\n s = [\"UNKNOWN\"] + [LABEL_MAP[s] for s in stages]\n else:\n i, s = starts, stages\n diff = psg_length_sec - i[-1]\n assert diff >= 0\n d = list(np.diff(i)) + [(diff//30) * 30]\n SparseHypnogram(i, d, [Defaults.get_stage_string_to_class_int()[s_] for s_ in s], 30)\n to_ids(i, d, s, out_path)",
"def holography(params, mode='same', debug=False):\r\n\r\n logger.info(f\"Starting holographic reconstruction...\")\r\n file_archive = FileArchive(file_list=params['PATHS']['inDir'], cards=[], dtypes=[])\r\n in_files = file_archive.files\r\n in_dir = file_archive.in_dir\r\n tmp_dir = params['PATHS']['tmpDir']\r\n\r\n # Input check\r\n if mode not in ['same', 'full', 'valid']:\r\n raise SpecklepyValueError('holography()', argname='mode', argvalue=mode,\r\n expected=\"either 'same', 'full', or 'valid'\")\r\n\r\n if 'apodizationType' in params['APODIZATION']:\r\n # Catch deprecated parameter name\r\n logger.warning(\"Parameter 'apodizationType' is deprecated. Use 'type' instead!\")\r\n params['APODIZATION']['type'] = params['APODIZATION']['apodizationType']\r\n if 'apodizationWidth' in params['APODIZATION']:\r\n # Catch deprecated parameter name\r\n logger.warning(\"Parameter 'apodizationWidth' is deprecated. Use 'radius' instead!\")\r\n params['APODIZATION']['radius'] = params['APODIZATION']['apodizationWidth']\r\n if params['APODIZATION']['type'] is None or params['APODIZATION']['type'].lower() not in ['gaussian', 'airy']:\r\n logger.error(f\"Apodization type has not been set or of wrong type ({params['APODIZATION']['type']})\")\r\n if params['APODIZATION']['radius'] is None or not isinstance(params['APODIZATION']['radius'], (int, float)):\r\n logger.error(f\"Apodization radius has not been set or of wrong type ({params['APODIZATION']['radius']})\")\r\n\r\n # Initialize the outfile\r\n out_file = ReconstructionFile(filename=params['PATHS']['outFile'], files=in_files,\r\n cards={\"RECONSTRUCTION\": \"Holography\"}, in_dir=in_dir)\r\n\r\n # Initialize reconstruction\r\n reconstruction = Reconstruction(in_files=in_files, mode=mode, alignment_method='ssa',\r\n reference_image=params['PATHS']['alignmentReferenceFile'],\r\n in_dir=in_dir, tmp_dir=tmp_dir, out_file=params['PATHS']['outFile'],\r\n var_ext=params['OPTIONS']['varianceExtensionName'],\r\n box_indexes=params['OPTIONS']['box_indexes'], debug=debug)\r\n\r\n # (i-ii) Align cubes\r\n # shifts = get_shifts(files=in_files, reference_file=params['PATHS']['alignmentReferenceFile'],\r\n # lazy_mode=True, return_image_shape=False, in_dir=in_dir, debug=debug)\r\n shifts = reconstruction.shifts\r\n\r\n # (iii) Compute SSA reconstruction\r\n # image = ssa(in_files, mode=mode, outfile=out_file, in_dir=in_dir, tmp_dir=tmp_dir,\r\n # variance_extension_name=params['OPTIONS']['varianceExtensionName'])\r\n image = reconstruction.coadd_long_exposures()\r\n if isinstance(image, tuple):\r\n # SSA returned a reconstruction image and a variance image\r\n image, image_var = image\r\n total_flux = np.sum(image) # Stored for flux conservation\r\n\r\n # Start iteration from steps (iv) through (xi)\r\n while True:\r\n # (iv) Astrometry and photometry, i.e. StarFinder\r\n extract_sources(image=image,\r\n fwhm=params['STARFINDER']['starfinderFwhm'],\r\n noise_threshold=params['STARFINDER']['noiseThreshold'],\r\n background_subtraction=True,\r\n write_to=params['PATHS']['allStarsFile'],\r\n star_finder='DAO', debug=debug)\r\n\r\n # (v) Select reference stars\r\n print(\"\\tPlease copy your desired reference stars from the all stars file into the reference star file!\")\r\n input(\"\\tWhen you are done, hit a ENTER.\")\r\n\r\n # (vi) PSF extraction\r\n ref_stars = ReferenceStars(psf_radius=params['PSFEXTRACTION']['psfRadius'],\r\n reference_source_file=params['PATHS']['refSourceFile'], in_files=in_files,\r\n save_dir=tmp_dir, in_dir=in_dir,\r\n field_segmentation=params['PSFEXTRACTION']['fieldSegmentation'])\r\n if params['PSFEXTRACTION']['mode'].lower() == 'epsf':\r\n psf_files = ref_stars.extract_epsfs(file_shifts=shifts, debug=debug)\r\n elif params['PSFEXTRACTION']['mode'].lower() in ['mean', 'median', 'weighted_mean']:\r\n psf_files = ref_stars.extract_psfs(file_shifts=shifts, mode=params['PSFEXTRACTION']['mode'].lower(),\r\n debug=debug)\r\n else:\r\n raise RuntimeError(f\"PSF extraction mode '{params['PSFEXTRACTION']['mode']}' is not understood!\")\r\n logger.info(\"Saved the extracted PSFs...\")\r\n\r\n # (vii) Noise thresholding\r\n psf_noise_mask = None\r\n for file in psf_files:\r\n with fits.open(file, mode='update') as hdu_list:\r\n n_frames = hdu_list[0].header['NAXIS3']\r\n if psf_noise_mask is None:\r\n psf_noise_mask = get_noise_mask(hdu_list[0].data[0],\r\n noise_reference_margin=\r\n params['PSFEXTRACTION']['noiseReferenceMargin'])\r\n for index in range(n_frames):\r\n reference = np.ma.masked_array(hdu_list[0].data[index], mask=psf_noise_mask)\r\n background = np.mean(reference)\r\n noise = np.std(reference)\r\n update = np.maximum(hdu_list[0].data[index] - background -\r\n params['PSFEXTRACTION']['noiseThreshold'] * noise, 0.0)\r\n if np.sum(update) == 0.0:\r\n raise ValueError(\"After background subtraction and noise thresholding, no signal is leftover. \"\r\n \"Please reduce the noiseThreshold!\")\r\n update = update / np.sum(update) # Flux sum of order unity\r\n hdu_list[0].data[index] = update\r\n hdu_list.flush()\r\n\r\n # (viii) Subtraction of secondary sources within the reference apertures\r\n # TODO: Implement Secondary source subtraction\r\n pass\r\n\r\n # (ix) Estimate object, following Eq. 1 (Schoedel et al., 2013)\r\n f_object = FourierObject(in_files, psf_files, shifts=shifts, mode=mode, in_dir=in_dir)\r\n f_object.coadd_fft()\r\n\r\n # (x) Apodization\r\n f_object.apodize(type=params['APODIZATION']['type'], radius=params['APODIZATION']['radius'])\r\n\r\n # (xi) Inverse Fourier transform to retain the reconstructed image\r\n image = f_object.ifft(total_flux=total_flux)\r\n\r\n # Inspect the latest reconstruction\r\n if debug:\r\n imshow(image)\r\n\r\n # Save the latest reconstruction image to outfile\r\n out_file.data = image\r\n\r\n # Ask the user whether the iteration shall be continued or not\r\n answer = input(\"\\tDo you want to continue with one more iteration? [yes/no]\\n\\t\")\r\n if answer.lower() in ['n', 'no']:\r\n break\r\n\r\n # Repeat astrometry and photometry, i.e. StarFinder on final image\r\n extract_sources(image=image, fwhm=params['STARFINDER']['starfinderFwhm'],\r\n noise_threshold=params['STARFINDER']['noiseThreshold'], background_subtraction=True,\r\n write_to=params['PATHS']['allStarsFile'], star_finder='DAO', debug=debug)\r\n\r\n # Finally return the image\r\n return image",
"def test_tophat():\n savedImg = galsim.fits.read(os.path.join(imgdir, \"tophat_101.fits\"))\n myImg = galsim.ImageF(savedImg.bounds, scale=0.2)\n myImg.setCenter(0,0)\n test_flux = 1.8\n\n # There are numerical issues with using radius = 1, since many points are right on the edge\n # of the circle. e.g. (+-1,0), (0,+-1), (+-0.6,+-0.8), (+-0.8,+-0.6). And in practice, some\n # of these end up getting drawn and not others, which means it's not a good choice for a unit\n # test since it wouldn't be any less correct for a different subset of these points to be\n # drawn. Using r = 1.01 solves this problem and makes the result symmetric.\n tophat = galsim.TopHat(radius=1.01, flux=1)\n tophat.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject TopHat disagrees with expected result\")\n np.testing.assert_array_equal(\n tophat.radius, 1.01,\n err_msg=\"TopHat radius returned wrong value\")\n\n # Check with default_params\n tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=default_params)\n tophat.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject TopHat with default_params disagrees with expected result\")\n tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=galsim.GSParams())\n tophat.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject TopHat with GSParams() disagrees with expected result\")\n\n # Use non-unity values.\n tophat = galsim.TopHat(flux=1.7, radius=2.3)\n gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n tophat2 = galsim.TopHat(flux=1.7, radius=2.3, gsparams=gsp)\n assert tophat2 != tophat\n assert tophat2 == tophat.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n\n # Test photon shooting.\n do_shoot(tophat,myImg,\"TopHat\")\n\n # Test shoot and kvalue\n scale = 0.2939\n im = galsim.ImageF(16,16, scale=scale)\n # The choices of radius here are fairly specific. If the edge of the circle comes too close\n # to the center of one of the pixels, then the test will fail, since the Fourier draw method\n # will blur the edge a bit and give some flux to that pixel.\n for radius in [ 1.2, 0.93, 2.11 ]:\n tophat = galsim.TopHat(radius=radius, flux=test_flux)\n check_basic(tophat, \"TopHat with radius = %f\"%radius)\n do_shoot(tophat,im,\"TopHat with radius = %f\"%radius)\n do_kvalue(tophat,im,\"TopHat with radius = %f\"%radius)\n\n # This is also a profile that may be convolved using real space convolution, so test that.\n conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True)\n check_basic(conv, \"TopHat convolved with pixel in real space\",\n approx_maxsb=True, scale=0.2)\n do_kvalue(conv,im, \"TopHat convolved with pixel in real space\")\n\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(tophat.centroid, cen)\n np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(tophat.flux, test_flux)\n np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb)\n np.testing.assert_almost_equal(tophat.xValue(radius-0.001, 0.), tophat.max_sb)\n np.testing.assert_almost_equal(tophat.xValue(0., radius-0.001), tophat.max_sb)\n np.testing.assert_almost_equal(tophat.xValue(radius+0.001, 0.), 0.)\n np.testing.assert_almost_equal(tophat.xValue(0., radius+0.001), 0.)\n\n # Check picklability\n do_pickle(tophat, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(tophat)\n do_pickle(galsim.TopHat(1))\n\n # Check sheared tophat the same way\n tophat = galsim.TopHat(radius=1.2, flux=test_flux)\n # Again, the test is very sensitive to the choice of shear here. Most values fail because\n # some pixel center gets too close to the resulting ellipse for the fourier draw to match\n # the real-space draw at the required accuracy.\n tophat = tophat.shear(galsim.Shear(g1=0.15, g2=-0.33))\n check_basic(tophat, \"Sheared TopHat\")\n do_shoot(tophat,im, \"Sheared TopHat\")\n do_kvalue(tophat,im, \"Sheared TopHat\")\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(tophat.centroid, cen)\n np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(tophat.flux, test_flux)\n np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb)\n\n # Check picklability\n do_pickle(tophat, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(tophat)\n\n # Check real-space convolution of the sheared tophat.\n conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True)\n check_basic(conv, \"Sheared TopHat convolved with pixel in real space\",\n approx_maxsb=True, scale=0.2)\n do_kvalue(conv,im, \"Sheared TopHat convolved with pixel in real space\")",
"def hopping(h,name=\"HOPPING.OUT\",reps=0):\n if h.has_eh: raise\n if h.has_spin: (ii,jj,ts) = extract.hopping_spinful(h.intra)\n else: (ii,jj,ts) = extract.hopping_spinless(h.intra)\n f = open(name,\"w\") # write file\n for (i,j,t) in zip(ii,jj,ts):\n f.write(str(h.geometry.r[i][0])+\" \")\n f.write(str(h.geometry.r[i][1])+\" \")\n f.write(str(h.geometry.r[j][0])+\" \")\n f.write(str(h.geometry.r[j][1])+\" \")\n f.write(str(t)+\"\\n\")\n f.close()",
"def OneModeCoherentHO(Ns,t,nth,shots):\n s1 = np.zeros(shots)\n\n alpha = np.sqrt(Ns/4)\n \n for i in range(shots):\n prog= sf.Program(1)\n \n with prog.context as q:\n \n sf.ops.Coherent(alpha) | q[0] # State preparation\n sf.ops.ThermalLossChannel(t,nth) | q[0] # Thermal loss channel mimicing target\n \n sf.ops.MeasureX | q[0] # Het. Msmnt of signal 1\n\n\n # Need to run twice because of bug in the bosonic backend in dealing with repeated HD measurements\n \n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n \n #Collecting the samples\n samples = results.all_samples\n \n #Creating the measurement records\n s1[i] = samples[0][0]\n \n # Interation over number of shots is done, outputing the records\n \n return s1",
"def _exchange_halos(\n self,\n f,\n bc_f,\n replica_id,\n replicas,\n ):\n return halo_exchange.inplace_halo_exchange(\n f,\n self._halo_dims,\n replica_id,\n replicas,\n self._replica_dims,\n self._params.periodic_dims,\n bc_f,\n width=self._params.halo_width)",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def make(self, sample=10, scale=1, percentage=0, filename_addition='_halftoned', angles=[0,15,30,45], style='color', antialias=False):\n f, e = os.path.splitext(self.path)\n\n outfile = \"%s%s%s\" % (f, filename_addition, e)\n\n try:\n im = Image.open(self.path)\n except IOError:\n raise\n\n if style == 'grayscale':\n angles = angles[:1]\n gray_im = im.convert('L')\n dots = self.halftone(im, gray_im, sample, scale, angles, antialias)\n new = dots[0]\n\n else:\n cmyk = self.gcr(im, percentage)\n dots = self.halftone(im, cmyk, sample, scale, angles, antialias)\n new = Image.merge('CMYK', dots)\n\n new.save(outfile)",
"def _write_psf_cutouts_hst(self):\n\n print('writing psf cutouts')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n nfile=self.image_info.size\n nobj=obj_data.size\n\n cutout_hdu = self.fits['psf']\n\n for iobj in range(nobj):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n # HST psf is same for every cutout, in fact ncut should always\n # be 1\n try:\n psf_im = self.psf_data.get_psf(iobj)\n except AttributeError:\n psf_im = None\n\n ncut=obj_data['ncutout'][iobj]\n\n for icut in range(ncut):\n\n if psf_im is None:\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = self.psf_data[file_id]\n\n psf_im = p.get_rec(row,col)\n\n expected_psf_shape = (\n obj_data['psf_row_size'][iobj,icut],\n obj_data['psf_col_size'][iobj,icut],\n )\n\n file_id = obj_data['file_id'][iobj, icut]\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n start_row = obj_data['psf_start_row'][iobj, icut]\n\n if psf_im.shape != expected_psf_shape:\n raise ValueError(\"psf size mismatch, expected %s \"\n \"got %s\" % (expected_psf_shape, psf_im.shape))\n\n cutout_hdu.write(psf_im, start=start_row)",
"def run_haplotyper(reference, vcffile, sortedbam, filterthreshold):\n\tfragoutfile = reference+\".fragout\"\n\textractHAIRScmd=scriptdir+\"extractHAIRS --VCF \"+vcffile+\" --bam \"+sortedbam+\" --indels 1 --ref \"+reference+\" > \"+fragoutfile\n\tsubprocess.call(extractHAIRScmd, shell=True)\n\tfilteredfragfile = reference+\".filtered.fragout\"\n\tfilterthreshold = str(filterthreshold)\n\tfiltercmd = \"awk '{ if (length($NF) > \"+filterthreshold+\" ) print;}' \"+fragoutfile+\" > \"+filteredfragfile\n\tsubprocess.call(filtercmd, shell=True)\n\thapoutfile = reference+\".hapout\"\n\thapcutcmd = scriptdir+\"HAPCUT --fragments \"+filteredfragfile+\" --VCF \"+vcffile+\" --output \"+hapoutfile+\" --maxiter 11\"\n\tsubprocess.call(hapcutcmd, shell=True)\n\t\n\treturn hapoutfile",
"def export_hamilton(args):\n if args.type == 'filling_out':\n clarity_epp.export.hamilton.samplesheet_filling_out(lims, args.process_id, args.output_file)\n elif args.type == 'purify':\n clarity_epp.export.hamilton.samplesheet_purify(lims, args.process_id, args.output_file)",
"def TwoModeSqueezedHD(Ns,t,nth,shots):\n \n s1 = (1+1j)*np.zeros(shots)\n s2 = (1+1j)*np.zeros(shots)\n \n r = np.arcsinh(np.sqrt(Ns/2))\n \n for i in range(shots):\n prog= sf.Program(2)\n \n with prog.context as q:\n \n sf.ops.S2gate(r,0) | (q[0],q[1]) # State preparation\n sf.ops.ThermalLossChannel(t,nth) | q[0] # Thermal loss channel mimicing target\n \n sf.ops.MeasureHD | q[0] # Het. Msmnt of signal 1\n sf.ops.MeasureHD | q[1] # Het. Msmnt of signal 2\n\n # Need to run twice because of bug in the bosonic backend in dealing with repeated HD measurements\n \n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n \n #Collecting the samples\n samples = results.all_samples\n \n #Creating the measurement records\n s1[i] = samples[0][0]\n s2[i] = samples[1][0]\n \n # Interation over number of shots is done, outputing the records\n \n return s1,s2",
"def write_input(self, suffix=''):\n \n out_fname = \"input.plasma_1d\"+suffix\n with open(out_fname, 'w+') as outfile:\n outfile.write('# Input file for ASCOT containing radial 1D information of plasma temperature,density and toroidal rotation \\n')\n outfile.write('# range must cover [0,1] of normalised poloidal rho. It can exceed 1. \\n')\n outfile.write('# {:s} (first 3 lines are comment lines) \\n'.format(time.strftime('%d%b%y')))\n outfile.write('{:d}\\t{:1d}\\t# Nrad,Nion \\n'.format(self.nrho,self.nion))\n strcoll = str(1)+' ' # for electrons\n strZ=''\n strA=''\n for i in range(self.nion):\n strZ += str(self.Z[i]) + ' '\n strA += str(self.A[i]) + ' '\n strcoll += str(int(self.coll_mode[i])) + ' '\n strZ +='\\t\\t# ion Znum \\n'\n strA +='\\t\\t# ion Amass \\n'\n strcoll += '# collision mode (0= no colls, 1=Maxw colls, 2=binary colls, 3=both colls) 1st number is for electrons \\n'\n outfile.write(strZ)\t\t\t\t\n outfile.write(strA)\n outfile.write(strcoll)\n \n lab_len=15\n strlabel='RHO (pol)'.ljust(lab_len)+'Te (eV)'.ljust(lab_len)+'Ne (1/m3)'.ljust(lab_len)+'Vtor_I (rad/s)'.ljust(lab_len)+\\\n 'Ti1 (eV)'.ljust(lab_len)\n for i in range(self.nion):\n tmpstr ='Ni{:d} (1/m3)'.format(i+1)\n strlabel+=tmpstr.ljust(lab_len)\n strlabel+='\\n'\n outfile.write(strlabel)\n data=np.array((self.rho, self.te, self.ne, self.vt, self.ti), dtype=float)\n data = np.concatenate([data, [self.ni[i,:] for i in range(self.nion)]])\n\n data=np.transpose(data)\n #print(data)\n #print(\"if i don't print, it won't work\")\n np.savetxt(outfile, data, fmt='%.5e')",
"def _get_halo(self,i):\n if self._order is False:\n if self._subs is True:\n #this needs to be tested again on a snapshot that is not ordered!\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )))\n else:\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] )))\n \n else:\n if self._subs is False: #to use groups as halos:\n x = Halo(i, self, self.base, self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] ) \n else:\n x=Halo(i, self, self.base, self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )\n \n x._descriptor = \"halo_\"+str(i)\n x.properties.update(self.get_halo_properties(i))\n return x",
"def writeSerpent(self, output, mixsuffix):\n # Declare S(a,b) for H1_H2O, if it present. Here, Serpent requires the\n # ZAID :\n # http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#mat_moder\n tslFiles = None\n moder = ''\n for iso in self.m_compo:\n if iso == 'H1_H2O':\n moder = 'moder lwtr' + str(self.m_mix) + mixsuffix + ' 1001\\n'\n # Write the header line for the entire material...\n output += 'mat mix' + str(self.m_mix) + mixsuffix + ' sum\\n'\n output += moder\n output += 'tmp ' + str(self.m_temp) + ' % Kelvin\\n'\n # ...and then, write one line for each isotope. But let's prepare it,\n # first !\n for iso in self.m_compo:\n if iso == 'H1_H2O':\n iso_ace = 'H1lwtr'\n iso_ace_tsl = 'lwtr'\n else:\n iso_ace = iso\n # Subset xsdata file for this isotope we're on, for all available\n # temperatures\n xsdata_subset = []\n # Go through xsdata file, find ace files of this isotope we're on\n with open(xsdata) as xsdatafile:\n xsdatalines = xsdatafile.readlines()\n for xsdataline in xsdatalines:\n # First field in xsdata file contains isotope's name\n isoxsdata = xsdataline.split()[0]\n if isoxsdata.startswith(iso_ace + '.'):\n xsdata_subset.append(xsdataline.rstrip().split())\n if len(xsdata_subset) == 0:\n raise Exception('Could not find ace file for isotope: ' + iso)\n # Sort by ascending temperatures, contained in field [6]\n xsdata_subset.sort(key=lambda x: int(x[6]))\n # Check that a temperature below the one requested is available\n if int(xsdata_subset[0][6]) > self.m_temp:\n raise Exception('The minimum temperature available in the ace '\n + 'files of the isotope ' + iso_ace + ' is '\n + xsdata_subset[0][6] + 'K' + '. This is too high for the '\n + 'requested temperature: ' + str(self.m_temp) + 'K')\n # For TSL interpolation, we also need an available temperature\n # above the one requested\n if (int(xsdata_subset[-1][6]) < self.m_temp) and (iso == 'H1_H2O'):\n raise Exception('The maximal temperature available in the TSL '\n + 'ace files of the isotope ' + iso_ace + ' is '\n + xsdata_subset[-1][6] + 'K' + '. This is too low for the '\n + 'requested temperature: ' + str(self.m_temp) + 'K')\n # Select ace file with the temperature immediately below\n aceFile = None\n i = 0\n while not aceFile:\n if int(xsdata_subset[i][6]) > self.m_temp:\n aceFile = xsdata_subset[i-1][0]\n # Corresponding TSL files should also be kept (lower *and*\n # upper bounds are required)\n if iso == 'H1_H2O':\n if tslFiles:\n raise Exception('TSL file has been already '\n + 'attributed. D2S is limited to one single TSL '\n + 'per material.')\n # * Remove isotope name used in continuous ace and\n # use the TSL specific name instead\n # * Remove last character ('c' for 'continuous ace')\n # and replace it with 't' (for 'tsl ace')\n tslFiles = (iso_ace_tsl + xsdata_subset[i-1][1][-4:-1]\n + 't' + ' '\n + iso_ace_tsl + xsdata_subset[i][1][-4:-1]\n + 't')\n i = i + 1\n # Write the line for the isotope we're on\n output += \"%s %.8E\\n\" %(aceFile,self.m_compo[iso])\n # Write the 'thermr' card, if needed\n if tslFiles:\n output += ('therm lwtr' + str(self.m_mix) + mixsuffix + ' '\n + str(self.m_temp) + ' ' + tslFiles + '\\n')\n output += '\\n'\n return output",
"def generate_hdf(sav_file, instr, lamps, outfil, dtoler=0.6):\n from pypit import pyputils\n msgs = pyputils.get_dummy_logger()\n\n from pypit import arwave\n from pypit import arutils\n arutils.dummy_settings()\n #\n from arclines.pypit_utils import find_peaks\n from arclines.io import load_line_lists\n #\n\n # Read IDL save file\n sav_file = os.getenv('LONGSLIT_DIR')+'calib/linelists/'+sav_file\n s = readsav(sav_file)\n ctbl = Table(s['calib']) # For writing later\n\n # Line list\n alist = load_line_lists(lamps)\n\n # One spectrum?\n ashape = s['archive_arc'].shape\n if len(ashape) == 1:\n nspec = 1\n npix = ashape[0]\n else:\n nspec = s['archive_arc'].shape[0]\n npix = ashape[1]\n\n # Meta data\n mdict = dict(npix=npix, instr=instr,\n lamps=[str(ilamp) for ilamp in lamps], # For writing to hdf5\n nspec=nspec, infil=sav_file, IDairvac='vac')\n print(\"Processing {:d} spectra in {:s}\".format(mdict['nspec'], sav_file))\n\n # Start output\n outh5 = h5py.File(out_path+outfil, 'w')\n outh5.create_group('arcs')\n\n # Loop on spectra\n for ss in range(mdict['nspec']):\n sss = str(ss)\n # Parse\n if nspec == 1:\n spec = s['archive_arc']\n else:\n spec = s['archive_arc'][ss]\n calib = s['calib'][ss]\n # Peaks\n tampl, tcent, twid, w, yprep = find_peaks(spec)\n pixpk = tcent[w]\n pixampl = tampl[w]\n\n # Wavelength solution\n try:\n cfunc = calib['func'].decode('UTF-8')\n except:\n cfunc = calib['func']\n if cfunc == 'CHEBY':\n wv_air = cheby_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'], calib['nord'])\n elif cfunc == 'POLY':\n wv_air = poly_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'])\n else:\n pdb.set_trace()\n raise ValueError(\"Bad calib\")\n # Check blue->red or vice-versa\n if ss == 0:\n if wv_air[0] > wv_air[-1]:\n mdict['bluered'] = False\n else:\n mdict['bluered'] = True\n\n # Peak waves\n if calib['func'] == 'CHEBY':\n twave_air = cheby_val(calib['ffit'], pixpk,\n calib['nrm'], calib['nord'])\n else:\n twave_air = poly_val(calib['ffit'], pixpk, calib['nrm'])\n # Air to Vac\n twave_vac = arwave.airtovac(twave_air*u.AA)\n wave_vac = arwave.airtovac(wv_air*u.AA)\n if ss == 0:\n disp = np.median(np.abs(wave_vac-np.roll(wave_vac,1)))\n print(\"Average dispersion = {:g}\".format(disp))\n # IDs\n idwv = np.zeros_like(pixpk)\n idsion = np.array([str('12345')]*len(pixpk))\n for kk,twv in enumerate(twave_vac.value):\n # diff\n diff = np.abs(twv-alist['wave'])\n if np.min(diff) < dtoler:\n imin = np.argmin(diff)\n idwv[kk] = alist['wave'][imin]\n #idsion[kk] = alist['Ion'][imin] NIST\n idsion[kk] = alist['ion'][imin]\n # Red to blue?\n if mdict['bluered'] is False:\n pixpk = mdict['npix']-1 - pixpk\n # Re-sort\n asrt = np.argsort(pixpk)\n pixpk = pixpk[asrt]\n idwv = idwv[asrt]\n # Reverse\n spec = spec[::-1]\n wave_vac = wave_vac[::-1]\n # Output\n outh5['arcs'].create_group(sss)\n # Datasets\n outh5['arcs'][sss]['wave'] = wave_vac\n outh5['arcs'][sss]['wave'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['spec'] = spec\n outh5['arcs'][sss]['spec'].attrs['flux'] = 'counts'\n outh5['arcs'][sss]['pixpk'] = pixpk\n outh5['arcs'][sss]['ID'] = idwv\n outh5['arcs'][sss]['ID'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['Ion'] = str(idsion)\n # LR wavelengths\n outh5['arcs'][sss]['LR_wave'] = wv_air\n outh5['arcs'][sss]['LR_wave'].attrs['airvac'] = 'air'\n # LR Fit\n outh5['arcs'][sss].create_group('LR_fit')\n for key in ctbl.keys():\n outh5['arcs'][sss]['LR_fit'][key] = ctbl[ss][key]\n\n # Meta data\n outh5.create_group('meta')\n for key in mdict.keys():\n try:\n outh5['meta'][key] = mdict[key]\n except TypeError: # Probably a unicode thing\n if isinstance(mdict[key], list):\n if isinstance(mdict[key][0], basestring):\n tmp = [bytes(item, 'utf-8') for item in mdict[key]]\n else:\n tmp = mdict[key]\n elif isinstance(mdict[key], basestring):\n tmp = str(mdict[key])\n try:\n outh5['meta'][key] = tmp\n except TypeError:\n pdb.set_trace()\n # Close\n outh5.close()\n print('Wrote {:s}'.format(out_path+outfil))",
"def compute_hamiltonian_representations(frames, orbs, hypers, lmax, nu, cg, scale=1,\n select_feats = None, half_hete = True, mp_feats = False,\n rhoi_pca = None, rho2i_pca = None,\n rhoij_rho2i_pca = None, rhoij_pca = None,\n verbose = False\n ):\n\n spex = SphericalExpansion(**hypers)\n rhoi = compute_rhoi(frames, spex, hypers)\n\n # compresses further the spherical expansion features across species\n if rhoi_pca is not None:\n rhoi = apply_rhoi_pca(rhoi, rhoi_pca)\n\n # makes sure that the spex used for the pair terms uses adaptive species\n hypers_ij = deepcopy(hypers)\n hypers_ij[\"expansion_by_species_method\"] = \"structure wise\"\n spex_ij = SphericalExpansion(**hypers_ij)\n\n tnat = 0\n els = list(orbs.keys())\n nel = len(els)\n # prepare storage\n elL = list(itertools.product(els,range(lmax+1),[-1,1]))\n hetL = [ (els[i1], els[i2], L, pi) for i1 in range(nel) for i2 in range((i1+1 if half_hete else 0), nel) for L in range(lmax+1) for pi in [-1,1] ]\n feats = dict(diag = { L: [] for L in elL },\n offd_p = { L: [] for L in elL },\n offd_m = { L: [] for L in elL },\n hete = { L: [] for L in hetL },)\n\n if rhoij_rho2i_pca is None and rho2i_pca is not None:\n rhoij_rho2i_pca = rho2i_pca\n\n #before = tracemalloc.take_snapshot()\n for f in frames:\n fnat = len(f.numbers)\n frhoi = rhoi[tnat:tnat+fnat]*scale\n fgij = compute_gij(f, spex_ij, hypers_ij)*scale\n\n if (select_feats is None or select_feats[\"type\"]!=\"diag\") and nu == 2:\n if mp_feats:\n # note we abuse rhoij_rho2i_pca to fetch the rho1ijp pca compressor\n rhonui, prhonui = compute_all_rho1ijp_lambda(frhoi, fgij, cg, rhoij_rho2i_pca)\n else:\n rhonui, prhonui = compute_all_rho2i_lambda(frhoi, cg, rhoij_rho2i_pca)\n else:\n rhonui, prhonui = frhoi, None\n\n crhoi = None\n for L in range(lmax+1):\n if select_feats is not None and L>0 and select_feats[\"block\"][-2] != L:\n continue\n\n if nu==0:\n lrhonui, lprhonui = np.ones((fnat, 1, 2*L+1)), np.ones((1))\n elif nu==1:\n lrhonui, lprhonui = compute_rho1i_lambda(frhoi, L, cg)\n else:\n lrhonui, lprhonui = compute_rho2i_lambda(frhoi, L, cg)\n if rho2i_pca is not None:\n lrhonui, lprhonui = apply_rho2i_pca(lrhonui, lprhonui, rho2i_pca)\n\n if select_feats is None or select_feats[\"type\"]!=\"diag\":\n if nu==0:\n lrhoij, prhoij = compute_rho0ij_lambda(rhonui, fgij, L, cg, prhonui)\n elif nu==1:\n if mp_feats:\n lrhoij, prhoij = compute_rho1ijp_lambda(rhonui, fgij, L, cg, prhonui)\n else:\n lrhoij, prhoij = compute_rho1ij_lambda(rhonui, fgij, L, cg, prhonui)\n else:\n if mp_feats:\n lrhoij, prhoij = compute_rho11ijp_lambda(frhoi, rhonui, L, cg, prhonui)\n else:\n lrhoij, prhoij = compute_rho2ij_lambda(rhonui, fgij, L, cg, prhonui)\n if rhoij_pca is not None:\n lrhoij, prhoij = apply_rhoij_pca(lrhoij, prhoij, rhoij_pca)\n \n crhoi, pcrhoi = contract_rhoij(lrhoij, prhoij, f.symbols, els)\n\n for i, el in enumerate(els):\n iel = np.where(f.symbols==el)[0]\n if len(iel) == 0:\n continue\n if select_feats is not None and el != select_feats[\"block\"][0]:\n continue\n\n for pi in [-1,1]:\n wherepi = np.where(lprhonui==pi)[0]\n if len(wherepi)==0:\n # add a vector of zeros\n feats['diag'][(el, L, pi)].append(np.zeros(shape=(len(iel), 1, 2*L+1)))\n continue\n feats['diag'][(el, L, pi)].append(lrhonui[...,wherepi,:][iel].reshape((len(iel), -1, 2*L+1) ) )\n\n if crhoi is not None:\n wherepi = np.where(pcrhoi==pi)[0]\n if len(wherepi)==0:\n continue\n feats['diag'][(el, L, pi)][-1] = np.concatenate([\n feats['diag'][(el, L, pi)][-1],\n crhoi[...,wherepi,:][iel].reshape( (len(iel), -1, 2*L+1) )\n ], axis=-2)\n\n if select_feats is not None and select_feats[\"type\"]==\"diag\":\n continue\n\n triu = np.triu_indices(len(iel), 1)\n ij_up = (iel[triu[0]],iel[triu[1]]) # ij indices, i>j\n ij_lw = (ij_up[1], ij_up[0]) # ij indices, i<j\n lrhoij_p = (lrhoij[ij_up] + lrhoij[ij_lw])/np.sqrt(2)\n lrhoij_m = (lrhoij[ij_up] - lrhoij[ij_lw])/np.sqrt(2)\n for pi in [-1,1]:\n if len(ij_up[0])==0:\n continue\n wherepi = np.where(prhoij==pi)[0];\n if len(wherepi)==0:\n feats['offd_p'][(el, L, pi)].append( np.zeros((lrhoij_p.shape[0], 1, 2*L+1)) )\n feats['offd_m'][(el, L, pi)].append( np.zeros((lrhoij_p.shape[0], 1, 2*L+1)) )\n continue\n feats['offd_p'][(el, L, pi)].append(lrhoij_p[...,wherepi,:].reshape(lrhoij_p.shape[0], -1, 2*L+1))\n feats['offd_m'][(el, L, pi)].append(lrhoij_m[...,wherepi,:].reshape(lrhoij_m.shape[0], -1, 2*L+1))\n\n if select_feats is not None and select_feats[\"type\"]!=\"hete\":\n continue\n for elb in els[i+1:]:\n ielb = np.where(f.symbols==elb)[0]\n if len(ielb) == 0:\n continue\n if select_feats is not None and elb != select_feats[\"block\"][1]:\n continue\n\n # combines rho_ij and rho_ji\n lrhoij_het = lrhoij[iel][:,ielb]\n lrhoij_het_rev = np.swapaxes(lrhoij[ielb][:,iel],1,0)\n # make a copy and not a slice, so we keep better track\n for pi in [-1,1]:\n wherepi = np.where(prhoij==pi)[0];\n if len(wherepi)==0:\n feats['hete'][(el, elb, L, pi)].append(np.zeros((lrhoij_het.shape[0]*lrhoij_het.shape[1],1,2*L+1)))\n continue\n lrhoij_het_pi = lrhoij_het[...,wherepi,:]\n lrhoij_het_rev_pi = lrhoij_het_rev[...,wherepi,:]\n feats['hete'][(el, elb, L, pi)].append(\n np.concatenate([\n lrhoij_het_pi.reshape(\n (lrhoij_het.shape[0]*lrhoij_het.shape[1],-1,2*L+1) )\n ,\n lrhoij_het_rev_pi.reshape(\n (lrhoij_het_rev.shape[0]*lrhoij_het_rev.shape[1],-1,2*L+1) )\n ], axis=-2)\n )\n #del(lrhoij_het)\n #del(lrhoij_p, lrhoij_m)\n #del(lrhoij, lrho2)\n tnat+=fnat\n\n # cleans up combining frames blocks into single vectors - splitting also odd and even blocks\n for k in feats.keys():\n for b in list(feats[k].keys()):\n if len(feats[k][b]) == 0:\n continue\n block = np.vstack(feats[k][b])\n feats[k].pop(b)\n if len(block) == 0:\n continue\n\n feats[k][b] = block.reshape((block.shape[0], -1, 1+2*b[-2]))\n\n return feats",
"def setup():\n wcs = galsim.TanWCS(\n galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024)),\n #galsim.AffineTransform(0.26, 0., 0., 0.26, galsim.PositionD(1024,1024)),\n galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)\n )\n\n # Make the image (copied from test_single_image in test_simple.py)\n image = galsim.Image(2048, 2048, wcs=wcs)\n\n # Where to put the stars.\n x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ]\n y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ]\n\n # Draw a Gaussian PSF at each location on the image.\n sigma = 1.3\n g1 = 0.23\n g2 = -0.17\n du = 0.09 # in arcsec\n dv = -0.07\n flux = 123.45\n psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2).shift(du,dv) * flux\n for x, y in zip(x_list, y_list):\n bounds = galsim.BoundsI(int(x-31), int(x+32), int(y-31), int(y+32))\n offset = galsim.PositionD(x-int(x)-0.5, y-int(y)-0.5)\n psf.drawImage(image=image[bounds], method='no_pixel', offset=offset)\n image.addNoise(galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6))\n\n # Write out the image to a file\n image_file = os.path.join('output','test_stats_image.fits')\n image.write(image_file)\n\n # Write out the catalog to a file\n dtype = [ ('x','f8'), ('y','f8') ]\n data = np.empty(len(x_list), dtype=dtype)\n data['x'] = x_list\n data['y'] = y_list\n cat_file = os.path.join('output','test_stats_cat.fits')\n fitsio.write(cat_file, data, clobber=True)"
] |
[
"0.7275809",
"0.6712091",
"0.64900565",
"0.6376433",
"0.557622",
"0.5441786",
"0.53530097",
"0.52741796",
"0.5257279",
"0.5211083",
"0.5197172",
"0.5094587",
"0.5081192",
"0.50688195",
"0.5064385",
"0.50592387",
"0.5035094",
"0.5020665",
"0.50188285",
"0.49892023",
"0.49658528",
"0.4964031",
"0.49621853",
"0.49573877",
"0.49521193",
"0.4919528",
"0.4914332",
"0.48912087",
"0.48872438",
"0.48864567"
] |
0.67932194
|
1
|
Creates a 'children' array inside each halo's 'properties' listing the halo IDs of its children. Used in case the reading of substructure data from the AHFsupplied _substructure file fails for some reason.
|
def _setup_children(self):
for i in xrange(self._nhalos):
self._halos[i + 1].properties['children'] = []
for i in xrange(self._nhalos):
host = self._halos[i + 1].properties.get('hostHalo', -2)
if host > -1:
try:
self._halos[host + 1].properties['children'].append(i + 1)
except KeyError:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i+1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i+1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host+1].properties['children'].append(i+1)\n except KeyError:\n pass",
"def generate_children(self):\n\n if self.children is not None:\n return\n\n print \"Generating children for %s (%s rows)\" % (self.bbox, self.count)\n\n self.children = [QuadtreeNode(self.tree, b)\n for b in self.bounds.get_children()]\n\n with utils.msgpack_open(self.source_filename) as f:\n with utils.msgpack_open(self.children[0].source_filename, \"w\") as self.children[0].file:\n with utils.msgpack_open(self.children[1].source_filename, \"w\") as self.children[1].file:\n with utils.msgpack_open(self.children[2].source_filename, \"w\") as self.children[2].file:\n with utils.msgpack_open(self.children[3].source_filename, \"w\") as self.children[3].file:\n for row in f:\n for child in self.children:\n if self.tree.latitude_col in row and self.tree.longitude_col in row and child.bbox.contains(row[self.tree.longitude_col], row[self.tree.latitude_col]):\n child.file.write(row)\n child.count += 1\n break\n for child in self.children:\n del child.file\n\n return self.children",
"def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))",
"def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret",
"def children(self) -> Iterable[Heirarchical]:\n return []",
"def Children(self) -> _n_1_t_2:",
"def _get_child_meshes(obj):\n\tif obj.type == 'MESH':\n\t\treturn [obj], []\n\telse:\n\t\tmeshes, other = [], [obj]\n\t\tfor child in obj.children:\n\t\t\tchild_meshes, child_other = _get_child_meshes(child)\n\t\t\tmeshes += child_meshes\n\t\t\tother += child_other\n\n\t\treturn meshes, other",
"def createChildren(self):\n children = []\n posMoves = self.checkPossibleMoves()\n\n for i in range(self.nrOfCars):\n if len(posMoves[i]) > 0:\n for j in range(len(posMoves[i])):\n child = copy(self.changeable)\n child[i] = child[i] + posMoves[i][j]\n children.append(child)\n\n return children",
"def make_children(self):\r\n\t\tchildren = []\r\n\r\n\t\tposMoves = self.generate_possible_moves()\r\n\r\n\t\tfor direction in posMoves:\r\n\t\t\tnewChild = self.copy()\r\n\r\n\t\t\tnewChild.make_move(direction)\r\n\t\t\tnewChild.steps += 1\r\n\t\t\tnewChild.generate_heuristic()\r\n\t\t\tnewChild.eqHash = hash(str(newChild))\r\n\r\n\t\t\tchildren.append(newChild)\r\n\r\n\t\treturn children",
"def create_children(self):\n actionCount = len(self.availableActions)\n self.children = [None] * actionCount\n\n # Split creation into multiple threads if this is the master node.\n if self.level == 0 and USE_THREADS:\n threads = [None] * actionCount\n for idx in range(actionCount):\n threads[idx] = threading.Thread(target=create_child, args=(self, idx))\n threads[idx].start()\n for t in threads:\n t.join()\n else:\n for idx in range(actionCount):\n create_child(self, idx)\n # Stop making child branches if the most recent child branch already found lethal.\n if self.children[idx].get_max_win_strength() == WIN_VALUE:\n self.children = self.children[:idx+1]\n break",
"def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)",
"def addChildren( self, tree ):\n \n children = []\n for x in range(0,len(tree)):\n children.append([])\n \n for node, parent, level, ranges in tree:\n if node != 0:\n children[parent].append(node)\n \n new_tree = []\n for x in range(0,len(tree)):\n new_tree.append( [list(tree[x]), list(children[x])] )\n \n return new_tree",
"def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances",
"def setup_children(self):\n # Only generate new children if there are none\n if len(self.children) == 0:\n # Create the encoder and decoder genes\n encoder = EncoderGene(name='encoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n self.children = [encoder]\n\n decoder = DecoderGene(name='decoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n\n self.children.append(decoder)\n\n pass",
"def init_children(self):\n children = []\n legal_moves = list(chess.Board(self.state).legal_moves)\n for move in legal_moves:\n temp_board = chess.Board(self.state)\n temp_board.push_san(str(move))\n children.append(Node(temp_board.fen(), self))\n self.children = children",
"def children(self): # noqa: ANN201",
"def __initChild(self):\n if self.__child is None:\n self.__child = []\n self._populateChild()",
"def init_hierarchy(self):\n self.create_row(0)\n self.dataset_name = os.path.basename(self.root_path).strip('.zarr')\n\n self.plate_meta['plate'] = {'acquisitions': [{'id': 1,\n 'maximumfieldcount': 1,\n 'name': 'Dataset',\n 'starttime': 0}],\n 'columns': [],\n 'field_count': 1,\n 'name': self.dataset_name,\n 'rows': [],\n 'version': '0.1',\n 'wells': []}\n\n self.plate_meta['plate']['rows'].append({'name': self.rows[0]})\n\n self.well_meta['well'] = {'images': [], 'version': '0.1'}\n self.well_meta = dict(self.well_meta)",
"def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children",
"def get_array_of_children(self):\n children = [self.posXposYposZ,self.posXposYnegZ,self.posXnegYposZ,self.posXposYnegZ,self.negXposYposZ,self.negXposYnegZ,self.negXnegYposZ,self.negXnegYnegZ ] \n return children",
"def create_hierarchy(self):\n\t\tpass",
"def construct(self):\n self._content.sort(key=lambda x: (x.parent, x.index))\n i=0\n j=1\n while i<len(self._content):\n while j<len(self._content):\n if self._content[j].parent == self._content[i].index:\n self._content[i].children.append(self._content[j])\n j+=1\n else:\n break\n i+=1",
"def get_children(self):\n return []",
"def make_tree(\n self,\n recursive: bool = True\n ) -> list:\n children = []\n for file in self.path.iterdir():\n path = file\n\n if path.is_dir() and recursive:\n # try create Study\n try:\n children.append(Study(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotStudyFolder:\n pass\n # try create Experiment\n try:\n children.append(Experiment(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotExperimentFolder:\n pass\n #try create Processing\n try:\n children.append(Processing(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotProcessingFolder:\n pass\n children.append(Folder(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n try:\n if path.name in self._dataset_index:\n children.append(Dataset(path, **self._dataset_state))\n continue\n except (UnsuportedDatasetType, IncompleteDataset, NotADatasetDir):\n pass\n try:\n children.append(JCAMPDX(path, load=False))\n continue\n except (InvalidJcampdxFile, JcampdxVersionError):\n pass\n return children",
"def _getChildrenBom(self, component, level=0, currlevel=0):\n result = []\n bufferdata = []\n if level == 0 and currlevel > 1:\n return bufferdata\n for bomid in component.product_tmpl_id.bom_ids:\n for bomline in bomid.bom_line_ids:\n children=self._getChildrenBom(bomline.product_id, level, currlevel+1)\n bufferdata.extend(children)\n bufferdata.append(bomline.product_id.id)\n result.extend(bufferdata)\n return getCleanList(result)",
"def child_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.property_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result",
"def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items",
"def get_children(self):\n return self.children",
"def get_children(self):\n if not self.FileInfo:\n raise StopIteration(\"No children\")\n offset = self.offset_pad(self.FileInfo.obj_offset + self.ValueLength)\n return self._recurse_children(offset)",
"def read_hierarchy(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n \r\n while lin != 'end':\r\n parts = lin.split()\r\n if lin != 'begin':\r\n ind = self.get_index_by_name(parts[0])\r\n for i in range(1, len(parts)):\r\n self.vertices[ind].children.append(self.get_index_by_name(parts[i]))\r\n lin = self.read_line(fid)\r\n lin = self.read_line(fid)\r\n return lin"
] |
[
"0.74248827",
"0.60904205",
"0.59497046",
"0.58910304",
"0.5828248",
"0.58218026",
"0.56816673",
"0.5663531",
"0.5654989",
"0.565448",
"0.5599524",
"0.559273",
"0.5586169",
"0.55819756",
"0.5578019",
"0.5564336",
"0.55633384",
"0.5557217",
"0.5547155",
"0.5513899",
"0.55126333",
"0.5504425",
"0.5499283",
"0.5457131",
"0.54545444",
"0.5451757",
"0.5447966",
"0.5423118",
"0.5407583",
"0.54012704"
] |
0.7405761
|
1
|
Get the starting positions of each halo's particle information within the AHF_particles file for faster access later
|
def _get_file_positions(self,filename):
if os.path.exists(self._ahfBasename + 'fpos'):
f = util.open_(self._ahfBasename + 'fpos')
for i in range(self._nhalos):
self._halos[i+1].properties['fstart'] = int(f.readline())
f.close()
else:
f = util.open_(filename)
for h in xrange(self._nhalos):
if len((f.readline().split())) == 1:
f.readline()
self._halos[h+1].properties['fstart'] = f.tell()
for i in xrange(self._halos[h+1].properties['npart']):
f.readline()
f.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _load_ahf_particle_block(self, f, nparts=None):\n ng = len(self.base.gas)\n nd = len(self.base.dark)\n ns = len(self.base.star)\n nds = nd+ns\n\n if nparts is None:\n startline = f.readline()\n if len((startline.split()))==1:\n startline = f.readline()\n nparts = int(startline.split()[0])\n\n if self.isnew:\n if not isinstance(f, gzip.GzipFile):\n data = (np.fromfile(\n f, dtype=int, sep=\" \", count=nparts * 2).reshape(nparts, 2))[:, 0]\n data = np.ascontiguousarray(data)\n else:\n # unfortunately with gzipped files there does not\n # seem to be an efficient way to load nparts lines\n data = np.zeros(nparts, dtype=int)\n for i in xrange(nparts):\n data[i] = int(f.readline().split()[0])\n\n if self._use_iord:\n data = self._iord_to_fpos[data]\n else:\n if type(self.base) is not snapshot.nchilada.NchiladaSnap:\n hi_mask = data >= nds\n data[np.where(hi_mask)] -= nds\n data[np.where(~hi_mask)] += ng\n else:\n st_mask = (data >= nd) & (data < nds)\n g_mask = data >= nds\n data[np.where(st_mask)] += ng\n data[np.where(g_mask)] -= ns\n else:\n if not isinstance(f, gzip.GzipFile):\n data = np.fromfile(f, dtype=int, sep=\" \", count=nparts)\n else:\n # see comment above on gzipped files\n data = np.zeros(nparts, dtype=int)\n for i in xrange(nparts):\n data[i] = int(f.readline())\n data.sort()\n return data",
"def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles",
"def parse_chunks(self): \n result_particles = []\n (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()\n\n while chunk is not None:\n #\n # Discard the Flag record since it has already been processed.\n # We also need to check for this being the first record, \n # since an end of velocity record could result in a pattern match \n # with a Flag record if the size of the velocity records are \n # greater than or equal to the Flag record size.\n #\n if self._read_state[StateKey.FIRST_RECORD] and \\\n FLAG_RECORD_MATCHER.match(chunk):\n self._increment_state(FLAG_RECORD_SIZE)\n\n #\n # If we haven't reached the end of the Velocity record,\n # see if this next record is the last one (all zeroes).\n #\n elif not self._read_state[StateKey.VELOCITY_END]:\n velocity_end = self.velocity_end_record_matcher.match(chunk)\n self._increment_state(self.velocity_record_size)\n\n #\n # A velocity data record of all zeroes does not generate\n # a data particle.\n #\n if velocity_end:\n self._read_state[StateKey.VELOCITY_END] = True\n else:\n #\n # If the file is missing an end of velocity record,\n # meaning we'll exhaust the file and run off the end,\n # this test will catch it.\n #\n velocity_fields = self.parse_velocity_record(chunk)\n if velocity_fields:\n #\n # Generate a data particle for this record and add\n # it to the end of the particles collected so far.\n #\n timestamp = self.calculate_timestamp()\n ntp_time = ntplib.system_to_ntp_time(timestamp)\n\n particle = self._extract_sample(\n Vel3dKWfpStcVelocityDataParticle,\n None, velocity_fields, ntp_time)\n\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n #\n # Ran off the end of the file. Tell 'em the bad news.\n #\n else:\n log.warn(\"EOF reading velocity records\")\n raise SampleException(\"EOF reading velocity records\")\n\n #\n # If we have read the end of velocity data records,\n # the next record is the Time data record by definition.\n # Generate the data particle and\n # add it to the end of the particles collected so far.\n #\n else:\n #\n # Make sure there was enough data to comprise a Time record.\n # We can't verify the validity of the data,\n # only that we had enough data.\n #\n time_fields = self.parse_time_record(chunk)\n if time_fields:\n #\n # Convert the tuple to a list, add the number of\n # Velocity record received (not counting the end of\n # Velocity record, and convert back to a tuple.\n #\n time_list = list(time_fields)\n time_list.append(self.calculate_record_number() - 1)\n time_fields = tuple(time_list)\n ntp_time = ntplib.system_to_ntp_time(self.time_on)\n\n particle = self._extract_sample(\n Vel3dKWfpStcTimeDataParticle, \n None, time_fields, ntp_time)\n\n self._increment_state(TIME_RECORD_SIZE)\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n else:\n log.warn(\"EOF reading time record\")\n raise SampleException(\"EOF reading time record\")\n\n self._read_state[StateKey.FIRST_RECORD] = False\n\n (timestamp, chunk, start, \n end) = self._chunker.get_next_data_with_index()\n\n return result_particles",
"def parse_chunks(self):\n\n file_name = self._stream_handle.name\n sequence_number = None\n file_time = None\n\n # Extract the sequence number & file time from the file name\n match = FILE_NAME_MATCHER.search(file_name)\n\n if match:\n # store the sequence number & file time to put into the particle\n sequence_number = match.group(AdcptMWVSParticleKey.SEQUENCE_NUMBER)\n file_time = match.group(AdcptMWVSParticleKey.FILE_TIME)\n else:\n message = 'Unable to extract file time or sequence number from WVS input file: %s '\\\n % file_name\n log.warn(message)\n self._exception_callback(RecoverableSampleException(message))\n\n result_particles = []\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n\n while chunk:\n\n particle = self._extract_sample(self._particle_class, sequence_number, file_time,\n None, chunk, None)\n\n if particle is not None:\n result_particles.append((particle, None))\n\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n\n return result_particles",
"def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n print \"Loading particles from h5 file: \", particles_file\r\n\r\n if myrank == 0:\r\n #h5 = tables.open_file(particles_file)\r\n h5 = h5py.File(particles_file)\r\n \r\n # use explicit int conversion otherwise there seems to\r\n # be a typepython->C++ type mismatch of numpy.int64->int\r\n #num_total_particles = int(h5.root.particles.shape[0])\r\n num_total_particles = int(h5['particles'].shape[0])\r\n \r\n if verbose:\r\n print \"Total of \", num_total_particles, \" particles from file\"\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n particles = h5['particles']\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm) \r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n \r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n\r\n return bunch",
"def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n if madx_format:\r\n print \"Loading madX particles from txt file: \", particles_file\r\n else:\r\n print \"Loading Synergia particles from txt file: \", particles_file\r\n\r\n if myrank == 0:\r\n particles = np.loadtxt(particles_file)\r\n num_total_particles = particles.shape[0]\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 6) and (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n \r\n if madx_format:\r\n # numpy manipulations to convert kinematics\r\n # convert MAD-X T=-c*dt to Synergia c*ct\r\n particles[:,4] = -particles[:,4]\r\n # convert MAD-X Delta-E/pc to Synergia delta-p/p\r\n # sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)\r\n m_over_pc = pmass/p0c\r\n E_0_over_pc = E_0/p0c\r\n particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *\r\n (particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0\r\n \r\n\r\n # if there are no IDs, append particle ID column\r\n if particles.shape[1] != 7:\r\n particles_w_id = np.column_stack((particles,\r\n np.arange(num_total_particles, dtype='d')))\r\n else:\r\n particles_w_id = particles\r\n \r\n if myrank == 0:\r\n print \"Read \", num_total_particles, \" particles\"\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm)\r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles_w_id[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n return bunch",
"def get_positions(self):\n\n return np.array([p.position for p in self.particles])",
"def particles(self):\n if self.data_section is None:\n return None\n data_keys = self.data_section.keys()\n if bool(data_keys)==False:\n return None\n particles_section = self.data_section.values()[0].get('particles', None)\n if particles_section is None:\n return None\n return dict(\n (id, dict(species_id=spid, position=pos))\n for id, spid, pos in particles_section.value)",
"def read_groups_particles(filename):\n \n f = open(filename,'r')\n\n Ntot = fromstring(f.read(4),int32)[0]\n Pos\t = fromstring(f.read(3*4*Ntot),float32)\n Pos.shape = (Ntot,3)\n f.close()\n \n return Pos",
"def parse_chunks(self):\n\n result_particles = []\n (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()\n non_data = None\n\n # sieve looks for timestamp, update and increment position\n while (chunk != None):\n time_match = TIME_MATCHER.match(chunk)\n data_match = DATA_MATCHER.match(chunk)\n if time_match:\n log.trace(\"Encountered timestamp in data stream: %s\", time_match.group(1))\n self._timestamp = self._convert_string_to_timestamp(time_match.group(1))\n self._increment_state(end, self._timestamp)\n\n elif data_match:\n if self._timestamp <= 1.0:\n raise SampleException(\"No reasonable timestamp encountered at beginning of file!\")\n\n # particle-ize the data block received, return the record\n sample = self._extract_sample(self._particle_class, DATA_MATCHER, chunk, self._timestamp)\n if sample:\n # create particle\n log.trace(\"Extracting sample chunk %s with read_state: %s\", chunk, self._read_state)\n self._increment_state(end, self._timestamp) \n self._increment_timestamp() # increment one samples worth of time\n result_particles.append((sample, copy.copy(self._read_state)))\n\n # Check for noise between records, but ignore newline. This is detecting noise following\n # the last successful chunk read which is why it is post sample generation.\n if non_data is not None and non_data != \"\\n\":\n log.info(\"Gap in datafile detected.\")\n log.trace(\"Noise detected: %s\", non_data)\n self.start_new_sequence()\n\n if non_data is not None:\n self._increment_state(len(non_data), self._timestamp)\n\n (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()\n (nd_timestamp, non_data) = self._chunker.get_next_non_data(clean=True)\n\n return result_particles",
"def read_positions():\n return np.genfromtxt(\"POSITIONS.OUT\").transpose()",
"def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))",
"def parse_file(self):\n\n # Create the gps position interpolator\n gps_interpolator = GpsInterpolator()\n\n # the header was already read in the init, start at the samples\n for data_record in self._stream_handle:\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n data_dict = self._read_data(data_record)\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n\n # handle this particle if it is an engineering metadata particle\n # this is the glider_eng_metadata* particle\n if not self._metadata_sent:\n self._record_buffer.append(self.handle_metadata_particle(timestamp))\n\n # check for the presence of engineering data in the raw data row before continuing\n # This is the glider_eng* particle\n if GliderParser._has_science_data(data_dict, self._particle_class):\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))\n\n # check for the presence of GPS data in the raw data row before continuing\n # This is the glider_gps_position particle\n if GliderParser._has_science_data(data_dict, self._gps_class):\n gps_interpolator.append_to_buffer(\n self._extract_sample(self._gps_class, None, data_dict, internal_timestamp=timestamp))\n else:\n log.info(\"GPS data no-find: \")\n\n # check for the presence of science particle data in the raw data row before continuing\n # This is the glider_eng_sci* particle\n if GliderParser._has_science_data(data_dict, self._science_class):\n self._record_buffer.append(self._extract_sample(\n self._science_class, None, data_dict, internal_timestamp=timestamp))\n\n # If there are GPS entries, interpolate them if they contain gps lat/lon values\n if gps_interpolator.get_size() > 0:\n self._record_buffer.extend(gps_interpolator.process_and_get_objects())",
"def get_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...",
"def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS",
"def assignPositions(self):\n n = int(math.ceil(self.numAtoms**(1.0/3.0))) # Number of atoms in a direction\n particle = 0 # Particles placed so far\n \n for x in range(0, n):\n for y in range(0, n):\n for z in range(0, n):\n if (particle < self.numAtoms):\n self.atoms[particle].x = x * self.sigma\n self.atoms[particle].y = y * self.sigma \n self.atoms[particle].z = z * self.sigma\n particle += 1",
"def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]",
"def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds",
"def read_pts(filename):\n lines = open(filename).read().splitlines()\n lines = lines[3:71]\n\n landmarks = []\n ibug_index = 1 # count from 1 to 68 for all ibug landmarks\n for l in lines:\n coords = l.split()\n landmarks.append(eos.core.Landmark(str(ibug_index), [float(coords[0]), float(coords[1])]))\n ibug_index = ibug_index + 1\n\n return landmarks",
"def getSFpartData(opt, z, halodata, pids, coords, vels, nhalo, nsubhalo):\n\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\n\t# Lower and upper indices (first and last particle) for each halo\n\tlinds = halodata['Group/GroupOffsetType'][:,1]\n\tuinds = linds + halodata['Group/GroupLen']\n\n\t# PIDs, coords, vels for all particles in each FOF group (including substructure)\n\tfor ihalo in range(nhalo):\n\t\tpids_fof[ihalo] = np.array([part for part in pids[linds[ihalo]:uinds[ihalo]]])\n\t\tcoords_fof[ihalo] = np.array([coord for coord in coords[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_fof[ihalo] = np.array([vel for vel in vels[linds[ihalo]:uinds[ihalo]]])\n\n\t# Fix wraparound for haloes located near box edges\n\tcommon.fixWraparound(opt, coords_fof, z)\n\n\treturn pids_fof, coords_fof, vels_fof",
"def get_atom_pos(self, data):\n\n\n if 'neighborhood_size' in self.args:\n neighborhood_size = self.args['neighborhood_size']\n else:\n neighborhood_size = 30\n if 'threshold' in self.args:\n threshold = self.args['threshold']\n else:\n threshold = 30\n\n #Use filters to calculate peaks\n data_max = filters.maximum_filter(data, neighborhood_size)\n maxima = (data == data_max)\n data_min = filters.minimum_filter(data, neighborhood_size)\n diff = ((data_max - data_min) > threshold)\n maxima[diff == 0] = 0\n\n labeled, num_objects = ndimage.label(maxima)\n slices = ndimage.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2\n y.append(y_center)\n\n\n posiitons=[x,y]\n\n return positions",
"def load_particle_ic(self, file_name):\n \n\n data = np.genfromtxt(file_name, names = True)\n\n self.N_part = np.size(data['x'])\n\n self.pos = np.array([data['x'], data['y'], data['z']])\n self.pos = self.pos.T.reshape(self.N_part,3)\n self.vel = np.array([data['vx'], data['vy'], data['vz']])\n self.vel = self.vel.T.reshape(self.N_part,3)\n \n self.M_part = data['m'][0] # assuming all particles have same mass\n\n _my_print('loaded %6i particles from '%(self.N_part) + file_name)\n return",
"def picket_positions(self) -> Sequence[float]:\n picket_pos = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket = self._fit(line.center.y)\n else:\n picket = self._fit(line.center.x)\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n picket_pos.append(picket / self._image.dpmm)\n return picket_pos",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def get_data(eh, file_list):\n x_pos = []\n y_pos = []\n x_vel = []\n y_vel = []\n z_vel = []\n unique_x = []\n unique_y = []\n\n # reading data\n for file in file_list:\n with open(file, 'r') as f:\n f.readline() # Ignores first line\n for line in f:\n line = line.strip()\n column = line.split()\n if len(column) == 4:\n if file == file_list[0]:\n # Only takes position data from first file as the same in each file\n x_pos.append(float(column[0]))\n y_pos.append(float(column[1]))\n\n x_vel.append(float(column[2]))\n y_vel.append(float(column[3]))\n z_vel.append(0.0)\n\n if float(column[0]) not in unique_x:\n unique_x.append(float(column[0]))\n if float(column[1]) not in unique_y:\n unique_y.append(float(column[1]))\n else:\n x_vel.append(float(column[2]))\n y_vel.append(float(column[3]))\n z_vel.append(0.0)\n else:\n print \"Error: TXT file is not correct!\"\n\n ux = len(unique_x)\n uy = len(unique_y)\n\n\n # xmid and ymid are used to get xz- and yz-planes. The median value is used. If the number of\n # unique xs and ys is even, then the median value will be one that does not correspond to a\n # measurement. When this is the case, the first value is ignored so that the number of uniques is\n # odd, resulting in a median value that corresponds to a measurement.\n if ux % 2 == 0:\n xmid = np.median(unique_x[1:])\n else:\n xmid = np.median(unique_x)\n\n if uy % 2 == 0:\n ymid = np.median(unique_y[1:])\n else:\n ymid = np.median(unique_y)\n\n if eh == exp_h_list[-1]:\n print \"All data read.\"\n\n\n # checks list lengths to ensure matching and then averages the velocities for all files\n # and then returns an array with position and average velocities\n if len(x_pos) == len(y_pos):\n pos_count = len(x_pos)\n if len(x_vel) == len(y_vel) and len(x_vel) == len(z_vel):\n vel_count = len(x_vel)\n nof = vel_count / pos_count # equals number of files for each height\n ax_vel, ay_vel, az_vel = avg_data_each_h(nof, pos_count, x_vel, y_vel, z_vel)\n\n if make_sg:\n subgrid_array = sub_grid(ux, x_pos, y_pos, eh, ax_vel, ay_vel, az_vel)\n return subgrid_array\n else:\n z_pos = [eh] * len(x_pos)\n return xmid, ymid, zip(x_pos, y_pos, z_pos, ax_vel, ay_vel, az_vel)\n else:\n print \"Error: different number of velocities!\"\n else:\n print \"Error: not all x-positions have a corresponding y-position!\"",
"def read_genesis(self, file_name):\n \n # genesis coordinates are as follows:\n # x -- horizontal coordinate, m\n # px -- horizontal momentum, beta_x gamma\n # y -- vertical coordinate, m\n # py -- vertical momentum, beta_y gamma\n # th -- theta, the ponderomotive phase at fixed z\n # t -- time of arrival at fixed z, sec\n # gamma -- particle gamma\n\n return 0",
"def read_array_particles(particle_array, refpart, real_particles, bucket_length, comm, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n print \"Loading particles from: \".format(particle_array)\r\n\r\n if myrank == 0:\r\n \r\n # use explicit int conversion otherwise there seems to\r\n # be a typepython->C++ type mismatch of numpy.int64->int\r\n #num_total_particles = int(h5.root.particles.shape[0])\r\n num_total_particles = particle_array.shape[0]\r\n \r\n if verbose:\r\n print \"Total of \", num_total_particles, \" particles\"\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n particles = particle_array\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particle_array.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm) \r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n \r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particle_array[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n\r\n return bunch",
"def import_from_pos(fh):\n elem = None\n while True:\n l = fh.readline()\n if not l: break\n if 'nwfc1' in l and 'nwfc2' in l:\n w = l.split()\n nwfc1, nwfc2 = int(w[0]), int(w[1])\n # nwfc2 is assumed to be one - only one l value\n if 'lwfc1' in l:\n w = l.split('!')[0].split()\n lwfc1 = [int(_) for _ in w]\n if 'lwfc2' in l:\n lwfc2 = int(l.split()[0])\n if 'nonzero elements' in l:\n n = int(l.split()[0])\n elem = []\n l = fh.readline()\n c = 0\n while l and c < n:\n w = l.split()\n if len(w) in {5, 10}: # 5-col is for old pos format and 10-col is the enriched format by yfliang\n # (l,m) in lwfc1, m in lwfc2 (only one), i = (x=1,y=2,z=3)\n # m ranges from -l to l\n # elem = < h_c | r_i | beta_lm > (Core-level wavefunctions always proceed. )\n elem.append([int(_) for _ in w[ : 3]] + [float(w[3]) + 1j * float(w[4])]) \n l = fh.readline()\n c += 1\n return lwfc1, lwfc2, elem",
"def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions"
] |
[
"0.619073",
"0.59944826",
"0.5959006",
"0.5894076",
"0.58502007",
"0.58053553",
"0.57826936",
"0.57551414",
"0.57096756",
"0.5669289",
"0.55754834",
"0.55319995",
"0.5517277",
"0.551446",
"0.5512527",
"0.548089",
"0.5454202",
"0.54331416",
"0.5425648",
"0.54220545",
"0.53774685",
"0.53642344",
"0.536323",
"0.5361944",
"0.5361944",
"0.5323539",
"0.53163004",
"0.5314928",
"0.5300176",
"0.52961946"
] |
0.6874911
|
0
|
Load the particles for the next halo described in particle file f
|
def _load_ahf_particle_block(self, f, nparts=None):
ng = len(self.base.gas)
nd = len(self.base.dark)
ns = len(self.base.star)
nds = nd+ns
if nparts is None:
startline = f.readline()
if len((startline.split()))==1:
startline = f.readline()
nparts = int(startline.split()[0])
if self.isnew:
if not isinstance(f, gzip.GzipFile):
data = (np.fromfile(
f, dtype=int, sep=" ", count=nparts * 2).reshape(nparts, 2))[:, 0]
data = np.ascontiguousarray(data)
else:
# unfortunately with gzipped files there does not
# seem to be an efficient way to load nparts lines
data = np.zeros(nparts, dtype=int)
for i in xrange(nparts):
data[i] = int(f.readline().split()[0])
if self._use_iord:
data = self._iord_to_fpos[data]
else:
if type(self.base) is not snapshot.nchilada.NchiladaSnap:
hi_mask = data >= nds
data[np.where(hi_mask)] -= nds
data[np.where(~hi_mask)] += ng
else:
st_mask = (data >= nd) & (data < nds)
g_mask = data >= nds
data[np.where(st_mask)] += ng
data[np.where(g_mask)] -= ns
else:
if not isinstance(f, gzip.GzipFile):
data = np.fromfile(f, dtype=int, sep=" ", count=nparts)
else:
# see comment above on gzipped files
data = np.zeros(nparts, dtype=int)
for i in xrange(nparts):
data[i] = int(f.readline())
data.sort()
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_particle_ic(self, file_name):\n \n\n data = np.genfromtxt(file_name, names = True)\n\n self.N_part = np.size(data['x'])\n\n self.pos = np.array([data['x'], data['y'], data['z']])\n self.pos = self.pos.T.reshape(self.N_part,3)\n self.vel = np.array([data['vx'], data['vy'], data['vz']])\n self.vel = self.vel.T.reshape(self.N_part,3)\n \n self.M_part = data['m'][0] # assuming all particles have same mass\n\n _my_print('loaded %6i particles from '%(self.N_part) + file_name)\n return",
"def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles",
"def load_copy(self, i):\n\n from . import load\n\n if self._dosort is not None:\n i = self._sorted_indices[i-1]\n\n f = util.open_(self._ahfBasename + 'particles')\n\n fpos = self._halos[i].properties['fstart']\n f.seek(fpos,0)\n ids = self._load_ahf_particle_block(f, nparts=self._halos[i].properties['npart'])\n\n f.close()\n\n return load(self.base.filename, take=ids)",
"def load_copy(self, i):\n if i<self._halo_min or i>=self._halo_max:\n raise KeyError, \"No such halo\"\n\n from . import load\n return load(self.base.filename, take=self._get_particles_for_halo(i))",
"def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n print \"Loading particles from h5 file: \", particles_file\r\n\r\n if myrank == 0:\r\n #h5 = tables.open_file(particles_file)\r\n h5 = h5py.File(particles_file)\r\n \r\n # use explicit int conversion otherwise there seems to\r\n # be a typepython->C++ type mismatch of numpy.int64->int\r\n #num_total_particles = int(h5.root.particles.shape[0])\r\n num_total_particles = int(h5['particles'].shape[0])\r\n \r\n if verbose:\r\n print \"Total of \", num_total_particles, \" particles from file\"\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n particles = h5['particles']\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm) \r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n \r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n\r\n return bunch",
"def load_copy(self, i):\n if i>=len(self):\n raise KeyError, \"No such halo\"\n\n from . import load\n halo = load(self.base.filename, take=self._get_particles_for_halo(i))\n self._add_halo_id(halo, i)\n return halo",
"def load_from_file(self):\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x * size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x * size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x * size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x * size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x * size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x * size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x * size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x * size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x * size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x * size_sprite))\n # -tc- Le placement aléatoire des objets se fait bien une seule fois,\n # -tc- je ne vois pas de soucis ici\n self.objects_to_find = sample(self.paths, 3)\n # -tc- Ne pas utiliser print pour débugger mais un debugger\n print(self.paths)\n\n # -tc- return inutile et pas utilisé. Ce n'est pas comme cela qu'on procède pour retourner \n # -tc- plusieurs valeurs.\n return self.paths and self.wall0 and self.wall1 and self.wall2 and self.wall3 and self.wall4 and self.wall5 and self.wall6 and self.wall7 and self.wall8 and self.wall9 and self.objects_to_find and self.start and self.end",
"def __init__(self, filename, num_particles, max_iteration, maxFlip, maxTabuSize, w, c1, c2):\n #Read cnf formula from file\n self.clauses, self.num_literals, self.num_clauses = self.w_clauses_from_file(filename)\n\n #Parameters of PSO\n self.num_particles = num_particles\n self.max_iteration = max_iteration\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.max_flip = maxFlip\n\n #Tabu list parameters\n self.tabuList = []\n self.maxTabuSize = maxTabuSize\n\n #Initialize particles\n self.swarm = self.init_particles(self.num_particles, self.num_literals)\n\n #Initialize global best and it's fitness\n self.global_best = self.swarm[0].position\n self.global_best_fitness = self.fitness(self.global_best)",
"def __init__(self, *fname):\n # Atom positions, types and form factor table\n self.atom_pos = None # atom position -> N x 3 array, sorted based on atom type id\n # Index array saving indices that split atom_pos to get pos for each atom type\n # More specifically, let m = split_idx[i] and n = split_idx[i+1], then\n # atom_pos[m:n] contains all atoms for the ith atom type.\n self.split_idx = None\n self.num_atom_types = None # number of atom types\n self.ff_table = None # form factor table -> atom_type x qSample\n\n # Scattering\n self.q_sample = None # q vector sin(theta)/lambda\n self.num_q_samples = None # number of q samples\n # Compton scattering\n self.compton_q_sample = None # Compton: q vector sin(theta)/lambda\n self.num_compton_q_samples = 0 # number of Compton q samples\n self.sBound = None # Compton: static structure factor S(q)\n self.nFree = None # Compton: number of free electrons\n if len(fname) != 0:\n # read from pmi file to get info about radiation damage at a certain time slice\n if len(fname) == 1:\n datasetname = 'data/snp_0000001' # default dataset name -> set to be initial time\n self.read_h5file(fname[0], datasetname)\n elif len(fname) == 2:\n # both pmi file and the time slice (dataset) are provided\n self.read_h5file(fname[0], fname[1])\n else:\n raise ValueError('Wrong number of parameters to construct the particle object!')",
"def load_phantom(self,file_or_fname):\n pass",
"def make_from_file(filehandle):\n lines = filehandle.readlines()\n label = str(lines[0].rstrip('\\n'))\n mass = float(lines[1].rstrip('\\n'))\n position = list(lines[2].rstrip('\\n').split(','))\n velocity = list(lines[3].rstrip('\\n').split(','))\n particle = Particle3D(label=label, mass=mass, position=position, velocity=velocity)\n filehandle.close()\n return particle",
"def add_particles(P8gen, particles, data):\n for particle_id in particles:\n # Find particle in database (None: particle not found)\n particle = next((p for p in data['particles']\n if particle_id in [p['id'], p['name']]), None)\n if particle is None:\n raise ValueError(\"Could not find particle ID {0} in file {1}\"\n .format(particle, datafile))\n # Add the particle\n P8gen.SetParameters(particle['cmd'])",
"def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n if madx_format:\r\n print \"Loading madX particles from txt file: \", particles_file\r\n else:\r\n print \"Loading Synergia particles from txt file: \", particles_file\r\n\r\n if myrank == 0:\r\n particles = np.loadtxt(particles_file)\r\n num_total_particles = particles.shape[0]\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 6) and (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n \r\n if madx_format:\r\n # numpy manipulations to convert kinematics\r\n # convert MAD-X T=-c*dt to Synergia c*ct\r\n particles[:,4] = -particles[:,4]\r\n # convert MAD-X Delta-E/pc to Synergia delta-p/p\r\n # sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)\r\n m_over_pc = pmass/p0c\r\n E_0_over_pc = E_0/p0c\r\n particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *\r\n (particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0\r\n \r\n\r\n # if there are no IDs, append particle ID column\r\n if particles.shape[1] != 7:\r\n particles_w_id = np.column_stack((particles,\r\n np.arange(num_total_particles, dtype='d')))\r\n else:\r\n particles_w_id = particles\r\n \r\n if myrank == 0:\r\n print \"Read \", num_total_particles, \" particles\"\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm)\r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles_w_id[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n return bunch",
"def read_groups_particles(filename):\n \n f = open(filename,'r')\n\n Ntot = fromstring(f.read(4),int32)[0]\n Pos\t = fromstring(f.read(3*4*Ntot),float32)\n Pos.shape = (Ntot,3)\n f.close()\n \n return Pos",
"def load(file_name):\n ferme_fenetre()\n Hitori(file_name)",
"def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...",
"def load_single(self,inp, skip_ions=False):\n # Parse input\n if isinstance(inp,basestring):\n fil = inp\n elif isinstance(inp,tuple):\n field,gal_id = inp\n tmp = self.fits_path+'/'+field+'.'+gal_id+'.fits'\n fils = glob.glob(tmp)\n if len(fils) != 1:\n raise IOError('Bad field, gal_id: {:s}'.format(tmp))\n fil = fils[0]\n else:\n raise IOError('Bad input to load_single')\n\n # Read COS-Halos file\n print('cos_halos: Reading {:s}'.format(fil))\n hdu = fits.open(fil)\n summ = hdu[1].data\n galx = hdu[2].data\n self.cgm_abs.append( CGMSys(galx['ra'][0],\n galx['dec'][0],\n summ['zfinal'][0],\n galx['qsora'][0],\n galx['qsodec'][0], \n galx['zqso'][0]))\n mm = len(self.cgm_abs)-1\n # COS-Halos naming\n self.cgm_abs[mm].field = galx['field'][0]\n self.cgm_abs[mm].gal_id = galx['galid'][0]\n # Galxy properties\n self.cgm_abs[mm].galaxy.halo_mass = summ['LOGMHALO'][0] \n self.cgm_abs[mm].galaxy.stellar_mass = summ['LOGMFINAL'][0] \n self.cgm_abs[mm].galaxy.sfr = (galx['SFR_UPLIM'][0], galx['SFR'][0],\n galx['SFR_FLAG'][0]) # FLAG actually gives method used\n # Ions\n if skip_ions is True:\n return\n self.cgm_abs[mm].abs_sys.ions = IonClms()\n all_Z = []\n all_ion = []\n for jj in range(summ['nion'][0]):\n iont = hdu[3+jj].data\n if jj == 0: # Generate new Table\n dat_tab = Table(iont)\n else:\n try:\n dat_tab.add_row(Table(iont)[0])\n except:\n xdb.set_trace()\n all_Z.append(iont['zion'][0][0])\n all_ion.append(iont['zion'][0][1])\n # Add Z,ion\n dat_tab.add_column(Column(all_Z,name='Z'))\n dat_tab.add_column(Column(all_ion,name='ion'))\n # Set\n self.cgm_abs[mm].abs_sys.ions._data = dat_tab\n # NHI\n self.cgm_abs[mm].abs_sys.NHI = self.cgm_abs[mm].abs_sys.ions[(1,1)]['CLM']",
"def load_velo(self):\n # Find all the Velodyne files\n velo_path = os.path.join(self.sequence_path, 'velodyne', '*.bin')\n velo_files = sorted(glob.glob(velo_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n velo_files = [velo_files[i] for i in self.frame_range]\n\n print('Found ' + str(len(velo_files)) + ' Velodyne scans...')\n\n # Read the Velodyne scans. Each point is [x,y,z,reflectance]\n self.velo = utils.load_velo_scans(velo_files)\n\n print('done.')",
"def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))",
"def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd",
"def read_periodic(ifile, periodic_dx):\n while 1:\n #line = lines.pop(0)\n line = ifile.readline()\n\n a = re.search(re_pfaces, line)\n if a:\n if not periodic_dx:\n periodic_face_map[int(a.group(3), 16)] = int(a.group(5), 16)\n continue\n break\n\n if not periodic_dx:\n keys = periodic_face_map.keys()\n vals = periodic_face_map.itervalues()\n for key, val in zip(keys, vals):\n periodic_face_map[val] = key",
"def main(file_list, outname, fit_func, starting_guess, chunk, hill):\n \n class StopWhile(Exception): pass\n\n # See if we want to analyze chunks\n if chunk: pHStatFile.my_re = pHStatFile.chunkre\n\n xdata = np.zeros(len(file_list))\n ydata = np.zeros(len(file_list))\n # Convert the file_list into a list of pHStatFile objects if it's not yet\n if type(file_list[0]).__name__ == 'str':\n tmp = [pHStatFile(open(fname, 'r')) for fname in file_list]\n file_list = tmp\n del tmp\n # Build the list of output files\n output_files = {}\n for resid in file_list[0].list_of_residues:\n output_files[resid] = open('%s_%s.dat' % (outname, resid), 'w', 0)\n \n # Generate the x-data (the pHs). This never changes\n for i, frec in enumerate(file_list): xdata[i] = frec.pH\n\n # Now loop through all of our data\n numres = 0 # Number of residues we've looped through so far\n numframes = 0 # Number of frames we've looped through so far\n\n # This is the easiest way to bust out of an infinite loop -- Engulf the whole\n # thing in a big try-except, and catch a specialized exception.\n try:\n while True:\n numres += 1\n # If we've looped through all of our residues, then we know we've hit\n # the next frame, so update our counters accordingly\n if numres % len(output_files) == 0:\n numframes += 1\n numres = 1\n # Zero out the y-data, because we're about to fill it up\n ydata = np.zeros(len(file_list)) # fraction protonated\n offset = np.zeros(len(file_list)) # Offset for pKa\n pred = np.zeros(len(file_list)) # Predicted pKas\n trans = [0 for i in range(len(file_list))] # num of transitions\n # Loop through all of the files and get our next residue -- they should\n # be synchronized, so this should pull the same residue from each file\n for i, frec in enumerate(file_list):\n stuff = frec.get_next_residue()\n # If we got nothing bust out of the loop\n if not stuff:\n raise StopWhile\n resname,resnum,offset[i],pred[i],ydata[i],trans[i] = stuff\n ydata[i] = 1-ydata[i] # Get fraction DEprotonated\n # Make the y-data into a hill-plottable form\n if fit_func:\n # If we're doing a hill plot, adjust our starting guess to be\n # relatively close -- hill will start as 1, and pKa will start\n # as the average of pKa values (not including infinity)\n if hill:\n starting_guess = (get_avg_pka(pred), 1)\n try:\n params, cov = optimize.curve_fit(fit_func, xdata,\n ydata, starting_guess)\n except (RuntimeError, ValueError):\n # If we can't fit the data (expected at the beginning) just go on\n continue\n line = '%d ' % numframes\n try:\n for i, param in enumerate(params):\n try:\n# line += '%.4f %.4f ' % (param, math.sqrt(cov[i][i]))\n line += '%.4f ' % param\n except TypeError:\n# line += '%.4f %.4f ' % (param, cov)\n line += '%.4f ' % param\n except ValueError:\n continue\n else:\n # Average all of the predicted pKas, ignoring values whose offset is\n # >= 3 pH units\n runsum = runsum2 = numpts = 0\n for i in range(len(file_list)):\n if abs(offset[i]) < 3:\n runsum += pred[i]\n runsum2 += pred[i] * pred[i]\n numpts += 1\n\n if numpts == 0: continue\n avg = runsum / numpts\n stdev = math.sqrt(abs(runsum2/numpts - avg*avg))\n line = '%d %.4f %.4f' % (numframes, avg, stdev)\n \n # Now write out the data as: Frame # pKa1 std.dev. [hill.coef. std.dev.]\n # but only write out if we actually got a pKa this time around\n ofile = output_files['%s_%d' % (resname, resnum)]\n ofile.write(line + os.linesep)\n\n except StopWhile: pass",
"def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]",
"def create_file_empty_particles( self, fullpath, iteration,\n time, dt, select_nglobal_dict=None ):\n # Create the file (can be done by one proc or in parallel)\n f = self.open_file( fullpath,\n parallel_open=self.write_metadata_parallel )\n\n # Setup the different layers of the openPMD file\n # (f is None if this processor does not participate is writing data)\n if f is not None:\n\n # Setup the attributes of the top level of the file\n self.setup_openpmd_file( f, iteration, time, dt )\n # Setup the meshes group (contains all the particles)\n f.attrs[\"particlesPath\"] = np.string_(\"particles/\")\n particle_path = \"/data/%d/particles/\" %iteration\n particle_grp = f.require_group(particle_path)\n # Loop through all particle species\n for species_name in sorted(self.species_dict.keys()):\n species = self.species_dict[species_name]\n\n # Check the number of particles to write\n if select_nglobal_dict is not None:\n N = select_nglobal_dict[species_name]\n else:\n N = None\n\n # Create and setup the h5py.Group species_grp\n species_path = particle_path+\"%s/\" %(species_name)\n species_grp = f.require_group( species_path )\n self.setup_openpmd_species_group( species_grp, species, N=N )\n\n # Loop over the different quantities that should be written\n # and setup the corresponding datasets\n for particle_var in self.particle_data:\n\n # Vector quantities\n if particle_var in [\"position\", \"momentum\", \"E\", \"B\"]:\n # Setup the dataset\n quantity_path=species_path+ \"%s/\" %particle_var\n quantity_grp = f.require_group(quantity_path)\n for coord in [\"x\",\"y\",\"z\"]:\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = quantity_grp.create_dataset(\n coord, (N,), dtype='f8')\n else:\n dset = quantity_grp.create_dataset(\n coord, (0,), maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( quantity_grp,\n particle_var)\n\n # Scalar quantity\n elif particle_var in [\"weighting\", \"id\", \"t\"]:\n # Choose the type of the output\n if particle_var == \"id\":\n dtype = 'uint64'\n else:\n dtype = 'f8'\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = species_grp.create_dataset(\n particle_var, (N,), dtype=dtype )\n else:\n dset = species_grp.create_dataset( particle_var,\n (0,), maxshape=(None,), dtype=dtype)\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( dset, particle_var )\n\n # Unknown field\n else:\n raise ValueError(\n \"Invalid string in particletypes: %s\" %particle_var)\n\n # Close the file\n f.close()",
"def _get_file_positions(self,filename):\n if os.path.exists(self._ahfBasename + 'fpos'):\n f = util.open_(self._ahfBasename + 'fpos')\n for i in range(self._nhalos):\n self._halos[i+1].properties['fstart'] = int(f.readline())\n f.close()\n else:\n f = util.open_(filename)\n for h in xrange(self._nhalos):\n if len((f.readline().split())) == 1:\n f.readline()\n self._halos[h+1].properties['fstart'] = f.tell()\n for i in xrange(self._halos[h+1].properties['npart']):\n f.readline()\n f.close()",
"def LoadBatch(filename):",
"def read_opal(self, file_name, step_number=None, species_name = 'Species'):\n \n # opal coordinates are as follows:\n # x -- horizontal offset, m\n # xp -- horizontal momentum, beta*gamma\n # y -- vertical offset, m\n # yp -- vertical momentum, beta*gamma\n # z -- Position relative to ? (some sort of reference), m\n # p -- total momentum, beta*gamma\n # TODO: We really need to be able to pull from screens too but we'll settle for standard distribution output for now\n # TODO: We don't currently handle particle specific weight/charge in Species\n \n with h5.File(file_name, 'r') as pcdata:\n if not step_number:\n step_number = len(pcdata) - 1\n loc = 'Step#{}'.format(step_number)\n mp_count = pcdata[loc+'/z'].shape[0]\n particle_data = np.empty((mp_count, 6))\n for i, coord in enumerate(['x', 'px', 'y', 'py', 'z', 'pz']):\n print(loc+'/'+coord)\n particle_data[:, i] = pcdata[loc+'/'+coord]\n total_charge = pcdata[loc].attrs['CHARGE']\n \n # TODO: This needs to be a function --- it is used many times\n # probably just make a species instantiation function\n if species_name == 'Species':\n spec_name = species_name+'_'+str(len(self.species.keys()))\n else:\n spec_name = species_name\n\n # TODO: This shouldn't be specific to electrons\n self.species[spec_name] = Species(particle_data, charge=-1, mass=0.511e6, total_charge=total_charge)\n \n return 0",
"def Load_File(file_list_pos):\n\t#Check size of input file\n\tfile_info=os.stat(file_list_pos)\n\tif file_info.st_size >= 250000000:\n\t\tprint \"\\n***WARNING: FILE SIZE EXCEEDS 250 MB***\" \n\t\tprint \"CONSIDER USING --rnd_sample TO SPEED UP PROCESSING\"\n\t\n\t#Opens the binned ld file\n\tldFile = open(file_list_pos)\n\t#Loads the distance between the pairs as well as the different linkage statistics into numpy nd arrays\n\tBPDist,r2Pear,D,DPrime,r2GLS=np.loadtxt(ldFile, usecols=(2,3,4,5,6), unpack=True)\n\n\t#Sets the x to the distance between pairs and the response data to the r^2 value (can also change data to D, DPrime, and r2GLS)\n\tx=BPDist\n\t\n\t#Choose data type matching specified option\n\tif args.data_type == 'r2Pear':\n\t\tdata=r2Pear\n\telif args.data_type == 'D':\n\t\tdata=D\n\telif args.data_type == 'DPrime':\n\t\tdata=DPrime\n\telif args.data_type == 'r2GLS':\n\t\tdata=r2GLS\n\tAxis_Data=[x,data]\n\treturn Axis_Data",
"def read_particle( filename, species, quantity ) :\n # Translate the quantity to the OpenPMD format\n dict_quantity = { 'x' : 'position/x',\n 'y' : 'position/y',\n 'z' : 'position/z',\n 'ux' : 'momentum/x',\n 'uy' : 'momentum/y',\n 'uz' : 'momentum/z',\n 'w' : 'weighting'}\n if quantity in dict_quantity:\n opmd_quantity = dict_quantity[quantity]\n else:\n opmd_quantity = quantity\n\n # Open the HDF5 file\n dfile = h5py.File( filename, 'r' )\n base_path = get_bpath( dfile )\n particles_path = dfile.attrs['particlesPath'].decode()\n\n # Find the right dataset\n species_grp = dfile[ os.path.join( base_path, particles_path, species ) ]\n data = get_data( species_grp[ opmd_quantity ] )\n\n # - Return positions in microns, with an offset\n if quantity in ['x', 'y', 'z']:\n offset = get_data( species_grp[ 'positionOffset/%s' %quantity ] )\n data = 1.e6 * (data + offset)\n # - Return momentum in normalized units\n elif quantity in ['ux', 'uy', 'uz' ]: \n norm_factor = 1./( get_data( species_grp['mass'] ) * constants.c )\n data = data * norm_factor\n\n # Close the HDF5 file and return the data\n dfile.close()\n return( data )",
"def load(self, file):\n with open(file) as f:\n for x in f:\n #print(int(x[0:8], 2))\n self.ram[self.pc] = int(x[0:8], 2)\n self.pc += 1"
] |
[
"0.6476038",
"0.62456244",
"0.62420654",
"0.5975579",
"0.5904623",
"0.58064884",
"0.5734992",
"0.5640551",
"0.56393373",
"0.5608404",
"0.5608129",
"0.5560564",
"0.55414915",
"0.55377823",
"0.5471918",
"0.5459634",
"0.5459327",
"0.5428468",
"0.54155725",
"0.540176",
"0.53990984",
"0.5389474",
"0.538894",
"0.5361821",
"0.5347662",
"0.5325798",
"0.5281149",
"0.527696",
"0.5276595",
"0.52752495"
] |
0.66399556
|
0
|
Write the (ahf) halo catalog to disk. This is really a wrapper that calls writegrp, writetipsy, writestat. Writes .amiga.grp file (ascii group ids), .amiga.stat file (ascii halo catalog) and .amiga.gtp file (tipsy halo catalog). default outfile base simulation is same as snapshot s. function returns simsnap of halo catalog.
|
def writehalos(self, snapshot, halos, hubble=None, outfile=None):
s = snapshot
grpoutfile = s.filename + ".amiga.grp"
statoutfile = s.filename + ".amiga.stat"
tipsyoutfile = s.filename + ".amiga.gtp"
halos.writegrp(s, halos, grpoutfile)
halos.writestat(s, halos, statoutfile, hubble=hubble)
shalos = halos.writetipsy(s, halos, tipsyoutfile, hubble=hubble)
return shalos
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def writegrp(self, grpoutfile=False):\n snapshot = self[1].ancestor\n try:\n snapshot['grp']\n except:\n self.make_grp()\n if not grpoutfile:\n grpoutfile = snapshot.filename + '.grp'\n logger.info(\"Writing grp file to %s\" % grpoutfile)\n fpout = open(grpoutfile, \"w\")\n print >> fpout, len(snapshot['grp'])\n\n # writing 1st to a string sacrifices memory for speed.\n # but this is much faster than numpy.savetxt (could make an option).\n # it is assumed that max halo id <= nhalos (i.e.length of string is set\n # len(str(nhalos))\n stringarray = snapshot['grp'].astype(\n '|S' + str(len(str(self._nhalos))))\n outstring = \"\\n\".join(stringarray)\n print >> fpout, outstring\n fpout.close()",
"def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1",
"def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()",
"def writetipsy(self, snapshot, halos, tipsyoutfile, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = snapshot\n outfile = tipsyoutfile\n nhalos = halos._nhalos\n nstar = nhalos\n sout = new(star=nstar) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None:\n hubble = s.properties['h']\n sout.properties['h'] = hubble\n # ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc * sout.properties['h']\n lboxmpch = lboxkpc * sout.properties['h'] / 1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8. / 3.) ** .5\n tipsymunitmsun = rhocrithhco * lboxmpch ** 3 / sout.properties['h']\n\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties\n sout.star[ii]['mass'] = h['mass'] / hubble / tipsymunitmsun\n # tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['Xc'] / lboxkpch - 0.5\n sout.star[ii]['y'] = h['Yc'] / lboxkpch - 0.5\n sout.star[ii]['z'] = h['Zc'] / lboxkpch - 0.5\n sout.star[ii]['vx'] = h['VXc'] / tipsyvunitkms\n sout.star[ii]['vy'] = h['VYc'] / tipsyvunitkms\n sout.star[ii]['vz'] = h['VZc'] / tipsyvunitkms\n sout.star[ii]['eps'] = h['Rvir'] / lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout",
"def generate_hdf(sav_file, instr, lamps, outfil, dtoler=0.6):\n from pypit import pyputils\n msgs = pyputils.get_dummy_logger()\n\n from pypit import arwave\n from pypit import arutils\n arutils.dummy_settings()\n #\n from arclines.pypit_utils import find_peaks\n from arclines.io import load_line_lists\n #\n\n # Read IDL save file\n sav_file = os.getenv('LONGSLIT_DIR')+'calib/linelists/'+sav_file\n s = readsav(sav_file)\n ctbl = Table(s['calib']) # For writing later\n\n # Line list\n alist = load_line_lists(lamps)\n\n # One spectrum?\n ashape = s['archive_arc'].shape\n if len(ashape) == 1:\n nspec = 1\n npix = ashape[0]\n else:\n nspec = s['archive_arc'].shape[0]\n npix = ashape[1]\n\n # Meta data\n mdict = dict(npix=npix, instr=instr,\n lamps=[str(ilamp) for ilamp in lamps], # For writing to hdf5\n nspec=nspec, infil=sav_file, IDairvac='vac')\n print(\"Processing {:d} spectra in {:s}\".format(mdict['nspec'], sav_file))\n\n # Start output\n outh5 = h5py.File(out_path+outfil, 'w')\n outh5.create_group('arcs')\n\n # Loop on spectra\n for ss in range(mdict['nspec']):\n sss = str(ss)\n # Parse\n if nspec == 1:\n spec = s['archive_arc']\n else:\n spec = s['archive_arc'][ss]\n calib = s['calib'][ss]\n # Peaks\n tampl, tcent, twid, w, yprep = find_peaks(spec)\n pixpk = tcent[w]\n pixampl = tampl[w]\n\n # Wavelength solution\n try:\n cfunc = calib['func'].decode('UTF-8')\n except:\n cfunc = calib['func']\n if cfunc == 'CHEBY':\n wv_air = cheby_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'], calib['nord'])\n elif cfunc == 'POLY':\n wv_air = poly_val(calib['ffit'], np.arange(mdict['npix']),\n calib['nrm'])\n else:\n pdb.set_trace()\n raise ValueError(\"Bad calib\")\n # Check blue->red or vice-versa\n if ss == 0:\n if wv_air[0] > wv_air[-1]:\n mdict['bluered'] = False\n else:\n mdict['bluered'] = True\n\n # Peak waves\n if calib['func'] == 'CHEBY':\n twave_air = cheby_val(calib['ffit'], pixpk,\n calib['nrm'], calib['nord'])\n else:\n twave_air = poly_val(calib['ffit'], pixpk, calib['nrm'])\n # Air to Vac\n twave_vac = arwave.airtovac(twave_air*u.AA)\n wave_vac = arwave.airtovac(wv_air*u.AA)\n if ss == 0:\n disp = np.median(np.abs(wave_vac-np.roll(wave_vac,1)))\n print(\"Average dispersion = {:g}\".format(disp))\n # IDs\n idwv = np.zeros_like(pixpk)\n idsion = np.array([str('12345')]*len(pixpk))\n for kk,twv in enumerate(twave_vac.value):\n # diff\n diff = np.abs(twv-alist['wave'])\n if np.min(diff) < dtoler:\n imin = np.argmin(diff)\n idwv[kk] = alist['wave'][imin]\n #idsion[kk] = alist['Ion'][imin] NIST\n idsion[kk] = alist['ion'][imin]\n # Red to blue?\n if mdict['bluered'] is False:\n pixpk = mdict['npix']-1 - pixpk\n # Re-sort\n asrt = np.argsort(pixpk)\n pixpk = pixpk[asrt]\n idwv = idwv[asrt]\n # Reverse\n spec = spec[::-1]\n wave_vac = wave_vac[::-1]\n # Output\n outh5['arcs'].create_group(sss)\n # Datasets\n outh5['arcs'][sss]['wave'] = wave_vac\n outh5['arcs'][sss]['wave'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['spec'] = spec\n outh5['arcs'][sss]['spec'].attrs['flux'] = 'counts'\n outh5['arcs'][sss]['pixpk'] = pixpk\n outh5['arcs'][sss]['ID'] = idwv\n outh5['arcs'][sss]['ID'].attrs['airvac'] = 'vac'\n outh5['arcs'][sss]['Ion'] = str(idsion)\n # LR wavelengths\n outh5['arcs'][sss]['LR_wave'] = wv_air\n outh5['arcs'][sss]['LR_wave'].attrs['airvac'] = 'air'\n # LR Fit\n outh5['arcs'][sss].create_group('LR_fit')\n for key in ctbl.keys():\n outh5['arcs'][sss]['LR_fit'][key] = ctbl[ss][key]\n\n # Meta data\n outh5.create_group('meta')\n for key in mdict.keys():\n try:\n outh5['meta'][key] = mdict[key]\n except TypeError: # Probably a unicode thing\n if isinstance(mdict[key], list):\n if isinstance(mdict[key][0], basestring):\n tmp = [bytes(item, 'utf-8') for item in mdict[key]]\n else:\n tmp = mdict[key]\n elif isinstance(mdict[key], basestring):\n tmp = str(mdict[key])\n try:\n outh5['meta'][key] = tmp\n except TypeError:\n pdb.set_trace()\n # Close\n outh5.close()\n print('Wrote {:s}'.format(out_path+outfil))",
"def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()",
"def writetipsy(self, outfile=None, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = self._base()\n if outfile is None: outfile = s.filename+'.gtp'\n print \"write tipsy file to \", outfile\n sout = new(star=self._nhalos) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None: hubble = s.properties['h']\n sout.properties['h'] = hubble\n ### ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc*sout.properties['h']\n lboxmpch = lboxkpc*sout.properties['h']/1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8./3.)**.5\n tipsymunitmsun = rhocrithhco * lboxmpch**3 / sout.properties['h']\n\n print \"transforming \", self._nhalos, \" halos into tipsy star particles\"\n for ii in xrange(self._nhalos):\n h = self[ii+1].properties\n sout.star[ii]['mass'] = h['m']/hubble / tipsymunitmsun\n ## tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['pos'][0][0]/lboxmpch - 0.5\n sout.star[ii]['y'] = h['pos'][0][1]/lboxmpch - 0.5\n sout.star[ii]['z'] = h['pos'][0][2]/lboxmpch - 0.5\n sout.star[ii]['vx'] = h['vel'][0][0]/tipsyvunitkms\n sout.star[ii]['vy'] = h['vel'][0][1]/tipsyvunitkms\n sout.star[ii]['vz'] = h['vel'][0][2]/tipsyvunitkms\n sout.star[ii]['eps'] = h['r']/lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n print \"writing tipsy outfile %s\"%outfile\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout",
"def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)",
"def writeto(self, save_to_path, method='ascii',\n\t\ttell_sp=None):\n\t\t#pixel = np.delete(np.arange(1024),list(self.mask))\n\t\tpixel = np.arange(len(self.oriWave))\n\t\t## create the output mask array 0=good; 1=bad\n\t\tif (self.apply_sigma_mask) or (self.mask != []):\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\t\t\tnp.put(mask,self.mask,int(1))\n\t\telse:\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\n\t\tif method == 'fits':\n\t\t\t#fullpath = self.path + '/' + self.name + '_' + str(self.order) + '_all.fits'\n\t\t\t#hdulist = fits.open(fullpath, ignore_missing_end=True)\n\t\t\t#hdulist.writeto(save_to_path)\n\t\t\t#hdulist.close()\n\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\t## wavelength\n\t\t\thdu1 = fits.PrimaryHDU(self.wave/10000, header=self.header)\n\t\t\tsave_to_path2_1 = save_to_path2 + '_wave.fits'\n\t\t\thdu1.writeto(save_to_path2_1)\n\t\t\t## flux\n\t\t\thdu2 = fits.PrimaryHDU(self.flux, header=self.header)\n\t\t\tsave_to_path2_2 = save_to_path2 + '_flux.fits'\n\t\t\thdu2.writeto(save_to_path2_2)\n\t\t\t## uncertainty\n\t\t\thdu3 = fits.PrimaryHDU(self.noise, header=self.header)\n\t\t\tsave_to_path2_3 = save_to_path2 + '_uncertainty.fits'\n\t\t\thdu3.writeto(save_to_path2_3)\n\t\t\t## pixel\n\t\t\thdu4 = fits.PrimaryHDU(pixel, header=self.header)\n\t\t\tsave_to_path2_4 = save_to_path2 + '_pixel.fits'\n\t\t\thdu4.writeto(save_to_path2_4)\n\t\t\t## mask\n\t\t\thdu5 = fits.PrimaryHDU(mask, header=self.header)\n\t\t\tsave_to_path2_5 = save_to_path2 + '_mask.fits'\n\t\t\thdu5.writeto(save_to_path2_5)\n\n\t\t\tif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\t# the telluric standard model\n\t\t\t\twavelow = tell_sp2.wave[0] - 20\n\t\t\t\twavehigh = tell_sp2.wave[-1] + 20\n\t\t\t\ttell_mdl = smart.getTelluric(wavelow=wavelow,wavehigh=wavehigh)\n\t\t\t\t# continuum correction for the data\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2, \n\t\t\t\t\tmodel=tell_mdl,order=tell_sp2.order)\n\t\t\t\t# telluric flux\n\t\t\t\thdu6 = fits.PrimaryHDU(tell_sp.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_6 = save_to_path2 + '_telluric_flux.fits'\n\t\t\t\thdu5.writeto(save_to_path2_6)\n\t\t\t\t# telluric uncertainty\n\t\t\t\thdu7 = fits.PrimaryHDU(tell_sp.noise, header=tell_sp.header)\n\t\t\t\tsave_to_path2_7 = save_to_path2 + '_telluric_uncertainty.fits'\n\t\t\t\thdu5.writeto(save_to_path2_7)\n\t\t\t\t# telluric model\n\t\t\t\thdu8 = fits.PrimaryHDU(tell_mdl.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_8 = save_to_path2 + '_telluric_model.fits'\n\t\t\t\thdu5.writeto(save_to_path2_8)\n\t\t\t\t\n\n\t\telif method == 'ascii':\n\t\t\tif '.txt' not in save_to_path:\n\t\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\t\telse:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path\n\n\t\t\tif tell_sp is None:\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty',\n\t\t\t\t\t'pixel', 'mask'])\n\t\t\t\n\t\t\telif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2\n\t\t\t\t\t,order=self.order)\n\t\t\t\tlsf0 = smart.getLSF(tell_sp2)\n\t\t\t\ttell_sp2.flux = tell_sp2.oriFlux\n\t\t\t\ttell_sp2.wave = tell_sp2.oriWave\n\t\t\t\ttell_mdl = smart.convolveTelluric(lsf0, tell_sp2)\n\n\t\t\t\tprint(len(self.oriWave), len(self.oriFlux), len(self.oriNoise), len(tell_sp.oriFlux),\n\t\t\t\t\tlen(tell_sp.oriNoise), len(tell_mdl.flux), len(pixel), len(mask))\n\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'telluric_flux':list(tell_sp.oriFlux),\n\t\t\t\t\t'telluric_uncertainty':list(tell_sp.oriNoise),\n\t\t\t\t\t'telluric_model':list(tell_mdl.flux),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\n\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty', \n\t\t\t\t\t'telluric_flux', 'telluric_uncertainty', 'telluric_model',\n\t\t\t\t\t'pixel', 'mask'])",
"def save(self, file):\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()",
"def save(self, ofilename, oname, noisy_only = True):\n ofile = ROOT.TFile(ofilename, 'recreate')\n\n outhists = [h.Clone(oname % (i + 1)) for i, h in enumerate(self.modules)]\n for h, cells in zip(outhists, self.cells):\n if noisy_only: h.Reset()\n for cell in cells: h.SetBinContent(cell[0], cell[1], noisy_only * 1.)\n # h.Write()\n\n ofile.Write()\n ofile.Close()",
"def write_database(data,database,dataout,name):\n\n if not os.path.exists(database):\n output = FileTools.safe_hdf5_open(database,'w')\n else:\n output = FileTools.safe_hdf5_open(database,'a')\n\n obsid = BaseClasses.DataStructure.getObsID(data)\n if obsid in output:\n grp = output[obsid]\n else:\n grp = output.create_group(obsid)\n\n if name in grp:\n del grp[name]\n stats = grp.create_group(name)\n\n #for i in range(nBands):\n # if isinstance(self.avg_map_fits[i],type(None)):\n # continue\n # dnames += [f'Avg_Values_Band{i}',f'Avg_Errors_Band{i}']\n # dsets += [self.avg_map_fits[i]['Values'],self.avg_map_fits[i]['Errors']]\n\n #for (dname, dset) in zip(dnames, dsets):\n for dname, dset in dataout.items(): \n if dname in stats:\n del stats[dname]\n stats.create_dataset(dname, data=dset)\n output.close()",
"def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)",
"def save_Omg(self, infodir='auto', histogram=True, attribute=True, force_hdf5=False, overwrite=False):\n if infodir == 'auto' or infodir is None:\n infodir = dio.prepdir(self.lattice.lp['meshfn'])\n if self.Omg is not None:\n pinning_name = self.get_pinmeshfn_exten()\n # When running jobs in series (NOT in parallel), can save pinning directly to hdf5\n if force_hdf5:\n h5fn = dio.prepdir(self.lp['meshfn']) + 'omg_configs.hdf5'\n if glob.glob(h5fn):\n rw = \"r+\"\n else:\n rw = \"w\"\n\n with h5py.File(h5fn, rw) as fi:\n keys = fi.keys()\n # is this pinning configuration already in the hdf5 file?\n if pinning_name not in keys:\n # add pinning to the hdf5 file\n print 'saving pinning in hdf5...'\n fi.create_dataset(pinning_name, shape=np.shape(self.Omg), data=self.Omg, dtype='float')\n elif overwrite:\n data = fi[pinning_name] # load the data\n data[...] = self.Omg # assign new values to data\n else:\n raise RuntimeError('Pinning config already exists in hdf5, exiting...')\n else:\n # Otherwise perform standard save of a text file for the pinning configuration\n print 'saving pinning in txt...'\n fn = dio.prepdir(self.lp['meshfn']) + pinning_name + '.txt'\n np.savetxt(fn, self.Omg, header=\"Pinning frequencies Omg\")\n if histogram:\n plt.clf()\n fig, hist_ax = leplt.initialize_histogram(self.Omg, xlabel=r'Pinning frequencies, $\\Omega_g$')\n histfn = 'Omg_hist_mean' + sf.float2pstr(self.lp['Omg']) + self.lp['meshfn_exten']\n plt.savefig(infodir + histfn + '.png')\n plt.clf()\n print 'Saved Omg to ' + fn\n else:\n raise RuntimeError('self.Omg is None, so cannot save it!')",
"def save_to_hdf5(fname, df, cosmo={}, tname=\"RockstarMergerTrees\", min_vmax=0):\n f = h5py.File(fname, 'a', libver='latest')\n colheads = df.columns.values\n treenums = df.loc[df.vmax >= min_vmax].tree.unique()\n if tname in f.keys():\n print(\"File already contains a group named {0}, so I can't save to it.\"\n \" Exiting.\".format(tname))\n sys.exit(1337)\n t = f.create_group(tname)\n if HAVE_PBAR:\n treenums = tqdm(treenums, desc='Saving')\n for i, tnum in enumerate(treenums):\n tg = t.create_group('Tree_' + str(tnum))\n for j, col in enumerate(colheads):\n col_data = df.loc[(df.tree == tnum), col].values\n tg.create_dataset(col, data=col_data)\n head = f.create_group('Header')\n for param in cosmo:\n head.create_dataset(param, data=cosmo[param])\n f.close()",
"def write(self, ext_file_action=ExtFileAction.copy_relative_paths):\n if self.simulation_data.auto_set_sizes:\n self._update_size_defs()\n\n # create any folders in path\n package_file_path = self.get_file_path()\n package_folder = os.path.split(package_file_path)[0]\n if package_folder and not os.path.isdir(package_folder):\n os.makedirs(os.path.split(package_file_path)[0])\n\n # open file\n fd = open(package_file_path, \"w\")\n\n # write flopy header\n if self.simulation_data.write_headers:\n dt = datetime.datetime.now()\n header = (\n \"# File generated by Flopy version {} on {} at {}.\"\n \"\\n\".format(\n __version__,\n dt.strftime(\"%m/%d/%Y\"),\n dt.strftime(\"%H:%M:%S\"),\n )\n )\n fd.write(header)\n\n # write blocks\n self._write_blocks(fd, ext_file_action)\n\n fd.close()",
"def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()",
"def save_output(self):\n # Auxiliary functions\n def intro(otype, suffix):\n self.logprint(\"Saving {}...\".format(otype))\n dirname = os.path.join(self.outpath,\\\n self.conf[\"output_prefix\"] + \"_files/{}\".format(suffix))\n if os.path.exists(dirname): # Overwrite existing output\n shutil.rmtree(dirname)\n os.makedirs(dirname)\n return(dirname)\n def save(obj, filename):\n try:\n f = open(filename, \"wb\")\n pickle.dump(obj, f)\n finally:\n f.close()\n def outro(otype): self.logprint(\"{} saved.\".format(otype).capitalize())\n # Saving output\n if self.conf[\"output_mode\"] >= 2: # Save all snapshot pops\n dirname = intro(\"snapshot populations\", \"populations/snapshots\")\n for n in xrange(self.conf[\"n_runs\"]):\n for m in xrange(self.conf[\"n_snapshots\"]):\n pop = self.runs[n].record[\"snapshot_pops\"][m]\n filename = dirname + \"/run{0}_s{1}.pop\".format(n,m)\n save(pop, filename)\n del self.runs[n].record[\"snapshot_pops\"]\n outro(\"snapshot populations\")\n if self.conf[\"output_mode\"] >= 1: # Save final populations\n dirname = intro(\"final populations\", \"populations/final\")\n for n in xrange(self.conf[\"n_runs\"]):\n pop = self.runs[n].record[\"final_pop\"]\n filename = dirname + \"/run{}.pop\".format(n)\n save(pop, filename)\n del self.runs[n].record[\"final_pop\"]\n outro(\"final populations\")\n if self.conf[\"output_mode\"] >= 0: # Save records\n dirname = intro(\"run records\", \"records\")\n for n in xrange(self.conf[\"n_runs\"]):\n rec = self.runs[n].record\n filename = dirname + \"/run{}.rec\".format(n)\n save(rec, filename)\n outro(\"run records\")",
"def bgs_write_simdata(sim, overwrite=False):\n from desispec.io.util import makepath\n from desispec.io.util import write_bintable\n\n simdatafile = os.path.join(sim.simdir, \n 'bgs_{}_simdata.fits'.format(sim.simid))\n makepath(simdatafile)\n\n cols = [\n ('SEED', 'S20'),\n ('NSPEC', 'i4'),\n ('EXPTIME', 'f4'),\n ('AIRMASS', 'f4'),\n ('SEEING', 'f4'),\n ('MOONFRAC', 'f4'),\n ('MOONSEP', 'f4'),\n ('MOONALT', 'f4')]\n\n simdata = Table(np.zeros(sim.nsim, dtype=cols))\n simdata['EXPTIME'].unit = 's'\n simdata['SEEING'].unit = 'arcsec'\n simdata['MOONSEP'].unit = 'deg'\n simdata['MOONALT'].unit = 'deg'\n\n simdata['SEED'] = sim.seed\n simdata['NSPEC'] = sim.nspec\n simdata['AIRMASS'] = sim.airmass\n simdata['SEEING'] = sim.seeing\n simdata['MOONALT'] = sim.moonalt\n simdata['MOONSEP'] = sim.moonsep\n simdata['MOONFRAC'] = sim.moonfrac\n simdata['EXPTIME'] = sim.exptime\n\n if overwrite or not os.path.isfile(simdatafile):\n print('Writing {}'.format(simdatafile))\n write_bintable(simdatafile, simdata, extname='SIMDATA', clobber=True)\n\n return simdata",
"def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')",
"def save(self):\n if os.path.isfile(self.filename): os.remove(self.filename)\n fits.HDUList([self.primary_hdu, self.energs_hdu, self.params_hdu, self.spectra_hdu]).writeto(self.filename)",
"def _write_h5_out(self, fout, save_hybrid_meta=True):\n\n with Outputs(fout, mode='a') as out:\n if 'meta' in out.datasets and save_hybrid_meta:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out['meta'] = hybrid_meta\n\n for dset, data in self.profiles.items():\n out[dset] = data",
"def write_database(self,data):\n \n if not os.path.exists(self.database):\n output = FileTools.safe_hdf5_open(self.database,'w')\n else:\n output = FileTools.safe_hdf5_open(self.database,'a')\n\n obsid = self.getObsID(data)\n if obsid in output:\n grp = output[obsid]\n else:\n grp = output.create_group(obsid)\n\n grp.attrs['level3_filename'] = self.outfile\n\n if self.name in grp:\n del grp[self.name]\n lvl3 = grp.create_group(self.name)\n\n lvl3.attrs['version'] = __level3_version__\n lvl3.attrs['calibrator_obsid'] = self.nearest_calibrator\n lvl3.attrs['calibrator_source'] = self.cal_source\n output.close()",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def writeto(self, output):\n\n hdu = pyfits.PrimaryHDU(data=self.integrated_psf)\n (year, month, day, hour, minute, second, weekday, DOY, DST) = \\\n time.gmtime()\n hdu.header.update(\"DATE\", \"%4d-%02d-%02dT%02d:%02d:%02d\" %\n (year, month, day, hour, minute, second))\n hdu.header.update(\"FILENAME\", os.path.basename(output),\n comment=\"Name of this file\")\n hdu.header.update(\"INSTRUME\", self.instrument, \"Instrument name\")\n\n # Copy some specific keywords from the input header.\n ihdr = self.header\n if \"BUNIT\" in ihdr:\n hdu.header.update(\"BUNIT\", ihdr.get(\"BUNIT\"))\n if \"ERR_BUDG\" in ihdr:\n hdu.header.update(\"ERR_BUDG\", ihdr.get(\"ERR_BUDG\"),\n comment=\"Optical error budget version number\")\n if \"SI_FP\" in ihdr:\n hdu.header.update(\"SI_FP\", ihdr.get(\"SI_FP\"),\n comment=\"Focal plane for OPD calculation\")\n if \"OPD_WFE\" in ihdr:\n hdu.header.update(\"OPD_WFE\", ihdr.get(\"OPD_WFE\"),\n comment=\"OPD wavefront error (nm)\")\n if \"W\" in ihdr:\n hdu.header.update(\"W\", ihdr.get(\"W\"),\n comment=\"Flat width of hex segment (m)\")\n if \"GAP\" in ihdr:\n hdu.header.update(\"GAP\", ihdr.get(\"GAP\"),\n comment=\"Gap width between hex segments (m)\")\n if \"EDGE\" in ihdr:\n hdu.header.update(\"EDGE\", ihdr.get(\"EDGE\"),\n comment=\"Edge roll off (m)\")\n if \"SW\" in ihdr:\n hdu.header.update(\"SW\", ihdr.get(\"SW\"),\n comment=\"Obscuring strut width (m)\")\n if \"HTS\" in ihdr:\n hdu.header.update(\"HTS\", ihdr.get(\"HTS\"),\n comment=\"Height of segment isogrid\")\n if \"HT2\" in ihdr:\n hdu.header.update(\"HT2\", ihdr.get(\"HT2\"),\n comment=\"Height of secondary isogrid\")\n if \"HT3\" in ihdr:\n hdu.header.update(\"HT3\", ihdr.get(\"HT3\"),\n comment=\"Height of tertiary isogrid\")\n if \"FL\" in ihdr:\n hdu.header.update(\"FL\", ihdr.get(\"FL\"),\n comment=\"Focal length (m)\")\n\n # Add some keywords.\n if self.phase_file is not None:\n hdu.header.update(\"PHASE\", os.path.basename(self.phase_file),\n \"Name of phase image file\")\n if self.pupil_file is not None:\n hdu.header.update(\"PUPIL\", os.path.basename(self.pupil_file),\n \"Name of pupil image file\")\n hdu.header.update(\"OVERSAMP\", self.oversample, \"Oversampling factor\")\n hdu.header.update(\"CALCTYPE\", self.type,\n \"32 = single precision, 64 = double precision\")\n hdu.header.update(\"DIAMETER\", self.D, \"pupil diameter (meters)\")\n hdu.header.update(\"ORIG_NX\", self.header[\"naxis1\"],\n \"NAXIS1 in input image\")\n hdu.header.update(\"ORIG_NY\", self.header[\"naxis2\"],\n \"NAXIS2 in input image\")\n\n self.putCoordInfo(hdu)\n\n (wavelengths, weights) = self.filter\n if len(wavelengths) >= 99:\n root_wln = \"WAV\"\n root_wgt = \"WGT\"\n else:\n root_wln = \"WAVELN\"\n root_wgt = \"WEIGHT\"\n for i in range(len(wavelengths)):\n keyword = \"%s%d\" % (root_wln, i + 1)\n hdu.header.update(keyword, wavelengths[i],\n \"wavelength in microns\")\n keyword = \"%s%d\" % (root_wgt, i + 1)\n hdu.header.update(keyword, weights[i], \"weight\")\n\n ofd = pyfits.HDUList(hdu)\n try:\n ofd.writeto(output)\n except IOError as message:\n print(\"ERROR: Output file has NOT been written; \" \\\n \"use <psf>.writeto(output)\")\n print(message)\n return\n self.output_written = True",
"def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')",
"def test_save_overwriting(shipping_group, tmpdir):\n\n outfile = str(tmpdir.join(\"out.hdf5\"))\n\n with open(outfile, \"wb\") as out:\n pass\n\n shipping_group.save(outfile, 'hdf5')",
"def write(self,structure,tar,write_ft_soap_npy=True,write_ft_soap_png=True,write_ft_soap_full_npy=True,write_geo=True,op_id=0,format_geometry='aims'):\n \n if not is_descriptor_consistent(structure, self):\n raise Exception('Descriptor not consistent. Aborting.') \n \n desc_folder = self.configs['io']['desc_folder']\n descriptor_info = structure.info['descriptor']['descriptor_info']\n \n ft_soap_descriptor=structure.info['descriptor']['FT_SOAP_harmonics']\n \n \n \n if write_ft_soap_npy:\n \n ft_soap_filename_npy = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_harmonics'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_harmonics']['file_ending']\n \n np.save(ft_soap_filename_npy, ft_soap_descriptor)\n structure.info['FT_SOAP_harmonics_filename_npy'] = ft_soap_filename_npy\n tar.add(structure.info['FT_SOAP_harmonics_filename_npy'],arcname=only_file) \n \n if write_ft_soap_png:\n\n image_ft_soap_filename_png = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_harmonics_image'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_harmonics_image']['file_ending']\n \n plt.title(structure.info['label']+' FT SOAP descriptor ')\n plt.xlabel('FT SOAP component')\n plt.ylabel('FT SOAP value')\n plt.plot(ft_soap_descriptor)\n plt.savefig(image_ft_soap_filename_png)\n plt.close()\n structure.info['FT_SOAP_harmonics_filename_png'] = image_ft_soap_filename_png\n tar.add(structure.info['FT_SOAP_harmonics_filename_png'],arcname=only_file) \n \n if write_ft_soap_full_npy:\n \n full_fft=structure.info['descriptor']['FT_SOAP_full']\n \n ft_soap_full_filename_npy = os.path.abspath(os.path.normpath(os.path.join(desc_folder,\n structure.info['label'] +\n self.desc_metadata.ix[\n 'FT_SOAP_full_fft'][\n 'file_ending'])))\n only_file=structure.info['label'] + self.desc_metadata.ix['FT_SOAP_full_fft']['file_ending']\n \n np.save(ft_soap_full_filename_npy, full_fft)\n structure.info['FT_SOAP_full_filename_npy'] = ft_soap_full_filename_npy\n tar.add(structure.info['FT_SOAP_full_filename_npy'],arcname=only_file) \n \n if write_geo:\n \n coord_filename_in = os.path.abspath(os.path.normpath(os.path.join(desc_folder, structure.info['label'] +\n self.desc_metadata.ix['FT_SOAP_harmonics_coordinates'][\n 'file_ending'])))\n \n only_file=structure.info['label']+self.desc_metadata.ix['FT_SOAP_harmonics_coordinates']['file_ending']\n \n structure.write(coord_filename_in, format=format_geometry)\n structure.info['FT_SOAP_harmonics_coord_filename_in'] = coord_filename_in\n tar.add(structure.info['FT_SOAP_harmonics_coord_filename_in'],arcname=only_file)",
"def writeSerpent(self, output, mixsuffix):\n # Declare S(a,b) for H1_H2O, if it present. Here, Serpent requires the\n # ZAID :\n # http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual#mat_moder\n tslFiles = None\n moder = ''\n for iso in self.m_compo:\n if iso == 'H1_H2O':\n moder = 'moder lwtr' + str(self.m_mix) + mixsuffix + ' 1001\\n'\n # Write the header line for the entire material...\n output += 'mat mix' + str(self.m_mix) + mixsuffix + ' sum\\n'\n output += moder\n output += 'tmp ' + str(self.m_temp) + ' % Kelvin\\n'\n # ...and then, write one line for each isotope. But let's prepare it,\n # first !\n for iso in self.m_compo:\n if iso == 'H1_H2O':\n iso_ace = 'H1lwtr'\n iso_ace_tsl = 'lwtr'\n else:\n iso_ace = iso\n # Subset xsdata file for this isotope we're on, for all available\n # temperatures\n xsdata_subset = []\n # Go through xsdata file, find ace files of this isotope we're on\n with open(xsdata) as xsdatafile:\n xsdatalines = xsdatafile.readlines()\n for xsdataline in xsdatalines:\n # First field in xsdata file contains isotope's name\n isoxsdata = xsdataline.split()[0]\n if isoxsdata.startswith(iso_ace + '.'):\n xsdata_subset.append(xsdataline.rstrip().split())\n if len(xsdata_subset) == 0:\n raise Exception('Could not find ace file for isotope: ' + iso)\n # Sort by ascending temperatures, contained in field [6]\n xsdata_subset.sort(key=lambda x: int(x[6]))\n # Check that a temperature below the one requested is available\n if int(xsdata_subset[0][6]) > self.m_temp:\n raise Exception('The minimum temperature available in the ace '\n + 'files of the isotope ' + iso_ace + ' is '\n + xsdata_subset[0][6] + 'K' + '. This is too high for the '\n + 'requested temperature: ' + str(self.m_temp) + 'K')\n # For TSL interpolation, we also need an available temperature\n # above the one requested\n if (int(xsdata_subset[-1][6]) < self.m_temp) and (iso == 'H1_H2O'):\n raise Exception('The maximal temperature available in the TSL '\n + 'ace files of the isotope ' + iso_ace + ' is '\n + xsdata_subset[-1][6] + 'K' + '. This is too low for the '\n + 'requested temperature: ' + str(self.m_temp) + 'K')\n # Select ace file with the temperature immediately below\n aceFile = None\n i = 0\n while not aceFile:\n if int(xsdata_subset[i][6]) > self.m_temp:\n aceFile = xsdata_subset[i-1][0]\n # Corresponding TSL files should also be kept (lower *and*\n # upper bounds are required)\n if iso == 'H1_H2O':\n if tslFiles:\n raise Exception('TSL file has been already '\n + 'attributed. D2S is limited to one single TSL '\n + 'per material.')\n # * Remove isotope name used in continuous ace and\n # use the TSL specific name instead\n # * Remove last character ('c' for 'continuous ace')\n # and replace it with 't' (for 'tsl ace')\n tslFiles = (iso_ace_tsl + xsdata_subset[i-1][1][-4:-1]\n + 't' + ' '\n + iso_ace_tsl + xsdata_subset[i][1][-4:-1]\n + 't')\n i = i + 1\n # Write the line for the isotope we're on\n output += \"%s %.8E\\n\" %(aceFile,self.m_compo[iso])\n # Write the 'thermr' card, if needed\n if tslFiles:\n output += ('therm lwtr' + str(self.m_mix) + mixsuffix + ' '\n + str(self.m_temp) + ' ' + tslFiles + '\\n')\n output += '\\n'\n return output",
"def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")"
] |
[
"0.6592326",
"0.6552484",
"0.64661205",
"0.5749943",
"0.5684815",
"0.55851275",
"0.5578105",
"0.551729",
"0.5509045",
"0.54342794",
"0.54312193",
"0.5402147",
"0.5394604",
"0.5379659",
"0.5317676",
"0.53172624",
"0.52944994",
"0.52812386",
"0.5244382",
"0.52434015",
"0.52364534",
"0.518701",
"0.5183129",
"0.5177479",
"0.51667964",
"0.5161017",
"0.51538324",
"0.5148776",
"0.5147881",
"0.5146673"
] |
0.7460119
|
0
|
Lists days on which error requests make up more than 1% all requests
|
def error_dates():
results = query_database(QUERIES[2])
print('\nOn which days did more than 1% of requests lead to errors?\n')
for date, rate in results:
print(' * {} -- {:.2%}'.format(date, rate))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def days_with_request():\n\n # To print information\n information_string = '3. Days with more than ' \\\n '1% of request that lead to an error:\\n'\n\n # Query string\n query = \"\"\"select * from (select date(time),\n round(100.0*sum(case log.status\n when '200 OK' then 0 else 1 end)/count(log.status),3)\n as error from log group by date(time)\n order by error desc) as subq where error > 1;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t{0:%B %d, %Y} - {1}%'.format(result[0], result[1]))\n\n print(\"\\n\")",
"def find_error_days():\n query = \"\"\"\n SELECT all_requests.day,\n (ROUND((error_requests.bad * 1000)/all_requests.good)/10)\n AS percent\n FROM all_requests\n JOIN error_requests\n ON all_requests.day = error_requests.day\n WHERE ROUND((error_requests.bad * 1000)/all_requests.good) > 10\n \"\"\"\n results = psql_connection(query)\n\n print(\"Days with more than 1% errors\")\n for result in results:\n print '{date} - {errors} % errors'.format(\n date=result[0].strftime('%B %d, %Y'), errors=result[1])",
"def get_error_days():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # dividing views of bad requests and total request to get percentage\n c.execute(\"select bad_request.time, \"\n \"(bad_request.num * 1.0 / total_request.num) as errors \"\n \"from bad_request, total_request \"\n \"where bad_request.time = total_request.time \"\n \"and (bad_request.num * 1.0 / total_request.num) > 0.01\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"Day(s) where more than 1 percent of requests were errors:\"\n \"\\n\\n\")\n for time, errors in results:\n text_file.write(time.strftime('%B %d, %Y') + \" - \" +\n str(errors * 100)[:3] + \"% errors\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close()",
"def get_high_error_days():\n query3 = \"\"\"select to_char(date, 'Mon dd, yyyy'),\n error_rate from\n (select a.date, errors, requests,\n round(100.0 * errors / requests, 1) as error_rate\n from\n (select date(time) as date,\n count(*) as errors\n from log\n where status not like '%200%'\n group by date) as a,\n (select date(time) as date,\n count(*) as requests\n from log\n group by date) as b\n where a.date = b.date) as error_rates\n where error_rate >= 1.00;\"\"\"\n results = execute_query(query3)\n for result in results:\n print(\"- %s — %s%% errors\" % (result[0], result[1]))",
"def error_days():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n query = \"\"\"select log_dates.log_date as day, \n cast(errors.error_total as float) / cast(log_dates.total as float) \n as percent from log_dates, errors where\n log_dates.log_date = errors.error_date group by day, error_total, total\n having cast(errors.error_total as float) / cast(log_dates.total\n as float) >= .01 order by day asc;\"\"\"\n c.execute(query)\n errors = c.fetchall()\n print \" \"\n print \"Dates with more than 1% error rate:\"\n print \"-----------------------------------\"\n for error in errors:\n print error[0].strftime(\"%B %d , %Y\"), \"--\",\\\n \"{: .2%}\".format(error[1]), \"errors\"\n\n db.close()",
"def errDays():\n c = db.cursor()\n c.execute(\"select date, percent from avg_error\\\n where percent > 1.00;\")\n results = c.fetchall()\n c.close()\n return results",
"def error_report():\n db, c = connect(DBNAME)\n c.execute(\"select to_char(time,'FMMonth DD, YYYY') as date, \"\n \"round((sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100,2) \"\n \"as percent_error from log group by date \"\n \"having (sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100 > 1\")\n error_table = c.fetchall()\n db.close()\n print \"\\nDates on Which Over 1% of Requests Led to Errors:\"\n for error in error_table:\n if __name__ == '__main__':\n print str(error[0]) + \" - \" + str(error[1]) + \"%\"",
"def daily_error_gt_1pct(db):\n\n query = \"\"\"\n select day, round(error_pct,2) as error_pct\n from ( select day,\n ( ( sum(occurance) filter(where status != '200 OK')\n / sum(occurance) ) * 100 ) as error_pct\n from ( select to_char(time, 'Month DD, YYYY') as day,\n status,\n count(*) as occurance\n from log\n group by day, status\n order by day, occurance desc ) as subq1\n group by day ) as subq\n where subq.error_pct > 1;\n \"\"\"\n\n print('\\nOn which days did more than 1% of requests lead to errors?\\n')\n for row in do_query(db, query):\n print('\\t{} -- {}% errors'.format(row[0], row[1]))",
"def days_with_error() :\n query = \"\"\"SELECT errorlogs.date,round(100.0*errorcount/logcount,2) As Percent FROM logs,errorlogs\n WHERE logs.date=errorlogs.date AND errorcount>logcount/100\"\"\"\n result = get_data(query)\n print(\" 3. Days with more than 1% of error:\")\n print(\"\")\n for record in result :\n print(' ' + str(record[0]) + ' '+ '-' + \" \" + str(record[1]) + '%'+ ' '+ 'errors')\n print(\"\\t\")",
"def print_top_error_days():\n\n output = get_query_results(\n '''SELECT date, ROUND(fail*100.0/total, 2) AS percentage\n FROM errors WHERE (fail*100.0/total) > 1\n ORDER BY percentage DESC;'''\n )\n print(\"\\nDays With HTTP Error Rates Over 1%: \\n\")\n for date, rate in output:\n print(\"\\\"{0:%B %d, %Y}\\\" -- {1:}%\".format(date, rate))",
"def get_errorMoreThan1Percent():\n\n query = \"\"\"SELECT * FROM error_Records WHERE percent > 1.00\"\"\"\n\n posts = execute_query(query)\n print('\\nOn which days did more than 1% of requests lead to errors?')\n for i in posts:\n print(str(i[0])+'-'+str(i[1])+'% errors')",
"def query_errors():\r\n conn, cur = connect()\r\n query3 = (\"select * from errors where error >1\")\r\n cur.execute(query3)\r\n res3 = cur.fetchall()\r\n conn.close()\r\n print(\"\\nDays with more than 1% of requests lead to errors:\\n\")\r\n for i in range(0, len(res3), 1):\r\n print(str(res3[i][0]) + \" --> \" + str(round(res3[i][1], 2))+\" %errors\")",
"def one_percent_error_loads():\n print '3. The days where there are more than 1% load error are'\n return (\"\"\"SELECT gday, perc FROM (select date(time) as gday,\"\"\"\n \"\"\" ((count(CASE WHEN status = '404 NOT FOUND' THEN 1\"\"\"\n \"\"\" END)::decimal / count(status)::decimal) * 100.0) as perc\"\"\"\n \"\"\" FROM log GROUP BY gday) as errreq where perc >=1;\"\"\")",
"def problem_days(cursor):\n days = 'None found'\n try:\n logs = \"\"\"select daily.day,\n daily_total::integer/100,\n daily_errors::integer,\n daily_total\n from daily_logs as daily, error_logs as errors\n where (daily_total::integer/100.0) <\n daily_errors::integer\n and daily.day = errors.day\n order by daily.day\n \"\"\"\n\n cursor.execute(logs)\n report = cursor.fetchall()\n # If no days were found, return\n if len(report) <= 0:\n return days\n\n except psycopg2.Error as e:\n print('Fetching summary of days with >1% error statuses: \\r\\n{}'\n .format(e.pgerror))\n\n # If the query returns any days, return the results.\n else:\n day_str = ' {0} - {1}% of {2} were errors\\r\\n'\n days = 'Days when over 1% of requests lead to errors: \\r\\n'\n for date in report:\n percentage_error = round((date[2]/date[1]), 2)\n days += day_str.format(date[0].strftime('%d of %B %Y'),\n percentage_error,\n date[3])\n return days",
"def printDaysWithErrors():\n cursor = connection.cursor()\n query = \"\"\"\n SELECT * FROM\n (SELECT daily_error_view.day,\n (daily_error_view.errors * 100.0)\n /\n (daily_traffic_view.views * 100.0)\n AS error_rate\n FROM daily_error_view JOIN daily_traffic_view\n ON daily_error_view.day = daily_traffic_view.day)\n AS daily_error_rate\n WHERE daily_error_rate.error_rate > 0.01;\n \"\"\"\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nDays with greater than 1 percent error rate:\")\n for result in results:\n print(\"{:%B %d, %Y} - {:.2%} errors\".format(result[0], result[1]))",
"def get_days_rate():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_days_rate = \"\"\"\n SELECT * FROM (SELECT TO_CHAR(time::date,'Mon DD, YYYY') AS date,\n ROUND((COUNT(status) FILTER (\n WHERE status='404 NOT FOUND'))*100/COUNT(status)::decimal, 2)::text\n ||'% errors' AS rate\n FROM log\n GROUP BY time::date) AS error_rate\n WHERE rate::text > 1::text;\"\"\"\n c.execute(query_days_rate)\n rates = from_db_cursor(c)\n db.close()\n return rates",
"def error_rate():\n query = \"\"\"select to_char(date, 'FMMonth DD, YYYY') as date,\n round(error_req::numeric/total_req*100, 2) as error_rate\n from daily_errorreq_totalreq\n where round(error_req::numeric/total_req*100, 2) > 1.00\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report function\n report = table_to_report(result_table, '%')\n return \"Days Where Over 1% of Requests Leading to Errors:\\n\" + report",
"def getWorstDays():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select c.* from\"\n + \"(select a.* , b.* , \"\n + \"(cast( b.total as decimal(16,4))/a.total)*100 as percent from\"\n + \" (select count(*) total , time::timestamp::date as timea \"\n + \"from log group by timea order by timea) as a, \"\n + \"(select count(*) total , time::timestamp::date as timea \"\n + \"from log where status <> '200 OK'\"\n + \"group by timea order by timea ) as b \"\n + \"where a.timea = b.timea) as c where c.percent > 1;\")\n days = c.fetchall()\n db.close()\n return days",
"def query3():\n\n print(\"3. On which days did more than 1% of requests lead to errors?\\n\")\n\n query = \"\"\"\n SELECT view_daily_requests.date,\n CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) AS pc\n FROM view_daily_requests\n JOIN view_daily_errors\n ON view_daily_requests.date = view_daily_errors.date\n WHERE CAST(view_daily_errors.daily_errors AS REAL) /\n CAST(view_daily_requests.daily_requests AS REAL) >= 0.01\n ORDER BY pc DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"pc\" as percentage,\n # format date '31 December 2018'. Print output.\n j = list(j)\n j[0] = j[0].strftime(\"%d %B %Y\")\n j[1] = str(format(j[1], '%'))\n print(\" Date: {} - {} errors\".format(*j))",
"def n_subimissions_per_day( url, headers ):",
"def high_errors():\n\n cur.execute(\"SELECT newdate, percentage FROM stats WHERE percentage > 1;\")\n result = cur.fetchall()\n return result",
"def print_error_data(error_data):\n\n print('\\nDays when there were more than 1% errors in HTTP :\\n')\n for day in error_data:\n print(str(day[0]) + '\\t-\\t' + str(day[1]) + '% \\n')\n print('-------------------------------------------------------\\n')",
"def get_errorData_query():\n\n query = '''select total_requests.days, errors*100/total_requests as percentage\n from error_requests, total_requests\n where error_requests.days = total_requests.days\n and (errors*100/total_requests > 1);'''\n\n return query",
"def get_no_of_days(self, slug_ls):\n date_ls = []\n #for each country get first case confirmed date\n for i in slug_ls:\n url = self.base_url+\"dayone/country/\"+i+\"/status/confirmed\"\n response = requests.get(url)\n date_ls.append(response.json()[0]['Date'])\n \n t1 = date.today()\n days = []\n #Calculate 'days since first case' for each country\n for i in range(len(date_ls)):\n t2 = datetime.datetime.strptime(date_ls[i],\"%Y-%m-%dT%H:%M:%SZ\")\n days.append(str(t1-t2.date())[0:4])\n return days",
"def _get_resends(self):\n if not self.has_error():\n return []\n\n errors = []\n i = 0\n for item in self.my_json['results']:\n if item.has_key('error') and item['error'] == 'Unavailable':\n errors.append((i, item['error']))\n i += 1\n return errors",
"def find_error_dates(conn: sqlite3.Connection):\n\n curr = conn.cursor()\n\n # Daily\n querystr = '''\n CREATE TABLE error_days_d AS\n SELECT DISTINCT station, read_date\n FROM daily_raw dr \n WHERE flag1 IN ('I','P')\n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edd_idx\n ON error_days_d\n (station, read_date)\n '''\n curr.execute(querystr)\n\n # Hourly\n\n # Based on simple flags\n\n querystr = '''\n CREATE TABLE error_days_h AS\n SELECT DISTINCT station, read_date\n FROM hourly_raw hr \n WHERE flag1 IN ('[',']','{','}')\n OR flag2 IN ('Q','q')\n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edh_idx\n ON error_days_h\n (station, read_date)\n '''\n curr.execute(querystr)\n\n # Based on accumulation flags\n\n querystr = '''\n CREATE TABLE error_days_ha AS\n SELECT station, read_date, COUNT(1) AS count1\n FROM hourly_raw hr \n WHERE flag1 in ('a', 'A')\n GROUP BY station, read_date \n '''\n curr.execute(querystr)\n\n querystr = '''\n CREATE INDEX edha_idx\n ON error_days_ha\n (station, read_date)\n '''\n curr.execute(querystr)\n\n conn.commit()",
"def print_days(days):\n for (day, percent) in days:\n print \"%s - %.2f%% errors\" % (day, percent)",
"def get_last_seven_days():\n logs = json.load(open(\"seven_log\", \"r\"))\n days = [day for day in logs]\n usage = [[logs[day][gpu] for gpu in logs[day]] for day in logs]\n return days, usage",
"def Daysleftverification():\n pass",
"def sitetotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalrequests\n\t\texcept Exception as e:\n\t\t\traise e"
] |
[
"0.7756553",
"0.7632925",
"0.7542556",
"0.72305334",
"0.70753413",
"0.69370914",
"0.69317937",
"0.6914181",
"0.6869531",
"0.6851303",
"0.68247795",
"0.68124545",
"0.6742325",
"0.6640619",
"0.6598854",
"0.62288076",
"0.62149715",
"0.6205723",
"0.61201817",
"0.61027896",
"0.60082453",
"0.5915407",
"0.5843258",
"0.5783882",
"0.57735175",
"0.56476194",
"0.5620687",
"0.55579317",
"0.5521012",
"0.5514893"
] |
0.77700573
|
0
|
Generates a random Hermitian matrix as a numpy array.
|
def random_numpy_hermitian(nqubits, dtype=np.complex128):
shape = 2 * (2 ** nqubits,)
m = random_numpy_complex(shape, dtype)
return (m + m.T.conj()) / 2.0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def random_matrix(rows, cols):\n return np.random.randn(rows, cols)",
"def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5",
"def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random",
"def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]",
"def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp",
"def _get_rand_array(self):\n return np.random.random((self.w, self.h))",
"def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:\n rho = random_numpy_hermitian(nqubits, dtype=dtype)\n # Normalize\n ids = np.arange(2 ** nqubits)\n rho[ids, ids] = rho[ids, ids] / np.trace(rho)\n return rho.astype(dtype)",
"def _get_rand_array(self):\n return np.random.random((self.w + 1, self.h + 1, 2))",
"def generate_random_matrix(dim):\n\n A = np.complex128(np.random.random([dim, dim]))\n A_adjoint = A.conj().T\n\n P = A @ A_adjoint\n P += np.identity(len(P))\n\n P_inverse = np.linalg.inv(P)\n\n return P_inverse",
"def _make_random_matrix(self, n_components, n_features):",
"def matrix_generate(n):\n a = np.eye(n)\n max = 0\n for i in range(n):\n for j in range(n):\n a[i][j] = random.randint(0,50)\n a[j][i] = a[i][j]\n if a[i][j] > max:\n max = a[i][j]\n for i in range(n):\n a[i][i] = max * n + random.randint(20,40)\n return np.array(a)",
"def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )",
"def irandmatrix(n, range = 10):\n A = mp.matrix(n, n)\n for i in xrange(n):\n for j in xrange(n):\n A[i,j]=int( (2 * mp.rand() - 1) * range)\n return A",
"def randImS():\n u = 2*np.random.random()-1\n theta = 2*math.pi*np.random.random()\n h = np.zeros((1,4))\n h[0,1] = np.cos(theta)*np.sqrt(1-u**2)\n h[0,2] = np.sin(theta)*np.sqrt(1-u**2)\n h[0,3] = u\n return h",
"def H(self) -> BaseMatrix:",
"def H(self) -> BaseMatrix:",
"def sigMatrixGen(input_matrix, n):\n\n result = []\n\n for i in range(n):\n sig = sigGen(input_matrix)\n result.append(sig)\n\n # return a ndarray\n print(\"\\nsig matrix:\")\n print(np.array(result))\n print()\n return np.array(result)",
"def data_gen(size, p):\n #print(np.random.get_state()[1][0])\n random_table = np.random.binomial(size = size, p = p, n = 1)\n test_array = np.zeros((size, 2), dtype = int)\n for i in range(size):\n test_array[i,0] = i\n test_array[i,1] = random_table[i]\n return test_array",
"def init_matrix(x_dim = 10, y_dim = 10):\n ret = np.zeros((x_dim, y_dim))\n x_rand = np.random.randint(0, x_dim - 1)\n y_rand = np.random.randint(0, y_dim - 1)\n ret[x_rand, y_rand] = 1\n\n return(ret)",
"def gen_m(self, n_dims):\n m = np.random.randint(-1000, 1000, n_dims).astype(float)\n\n return m",
"def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray:\n\n a = np.random.uniform(0, 1, (size, size))\n if lower:\n ind = np.triu_indices(5, 1)\n else:\n ind = np.tril_indices(5, 1)\n a[ind] = 0\n\n return a",
"def guassian_initalisation(num_inputs, num_output, relus=False):\n\n return np.random.normal(0, 0.01, size=(num_inputs, num_outputs))",
"def _get_rand_array(self):\n rand_array = np.random.random((self.n, self.n, 2))\n rand_array[0, :, 1] = 0.\n rand_array[-1, :, 1] = 0.\n return rand_array",
"def Generate_Hermite(n, beta):\n main_diag = np.sqrt(2) * np.random.normal(size=n)\n off_diag = [np.sqrt(np.random.chisquare(beta * (n-i))) for i in range(1,n)]\n H = diags([off_diag, main_diag, off_diag], [-1,0,1]).toarray()/np.sqrt(2)\n return H",
"def normalized_random_array() -> np.array:\n z0 = np.random.randn(2) + 1j * np.random.randn(2)\n return z0 / np.linalg.norm(z0)",
"def _get_rand_array(self):\n pass",
"def make_table(m, n):\n return np.array([[0] * n for _ in range(m)], dtype=float)",
"def make_herm(n,transpose=False):\n #S we need special cases to handle the coefficients less than two, as the\n #S recursion formula works only for n>2. These cases aren't hard though!\n\n #S make the first element equal to 1\n h = np.zeros([n+1,n+1],dtype=np.float64)\n h[0,0] = 1.\n \n #S if the array is large enough, make element_2,2 equal to 2\n if n > 0:\n h[1,1] = 2.\n #S formula seems to work, found a different one on wikipedia. this one from\n #S make_herm.pro, maybe just the same result? need to work them out to \n #S equivalence. this returns correct array up to H_10\n if n > 1:\n for ind in range(2,n+1):\n h[ind,:] = np.roll(h[ind-1,:],1)*2.-2.*float(ind-1)*h[ind-2,:]\n #S if we want the transpose\n if transpose:\n return h.T\n\n #S otherwise just send out the h array\n else:\n return h",
"def randq():\n u = np.random.random((1,3))\n #print(\"u =\",u)\n h = np.zeros((1,4))\n h[0,0] = np.sin(2*math.pi*u[0,1])*np.sqrt(1-u[0,0])\n h[0,1] = np.cos(2*math.pi*u[0,1])*np.sqrt(1-u[0,0])\n h[0,2] = np.sin(2*math.pi*u[0,2])*np.sqrt(u[0,0])\n h[0,3] = np.cos(2*math.pi*u[0,2])*np.sqrt(u[0,0])\n return h",
"def _make_gaussian_matrix(\n data_count: int,\n feature_count: int,\n) -> np.ndarray:\n return np.random.randn(data_count, feature_count)"
] |
[
"0.66864717",
"0.6652837",
"0.65999264",
"0.65480554",
"0.644787",
"0.63744944",
"0.63523203",
"0.63135517",
"0.62188065",
"0.621837",
"0.62032086",
"0.6190402",
"0.61276305",
"0.61273324",
"0.5980093",
"0.5980093",
"0.591237",
"0.59014493",
"0.5889934",
"0.58402646",
"0.5815427",
"0.5802963",
"0.57760996",
"0.57615584",
"0.57469976",
"0.5743853",
"0.5737982",
"0.5711596",
"0.5710778",
"0.5695609"
] |
0.6777068
|
0
|
Generates a random density matrix. Note that the density matrix generated by this method is not necessarily positive. This is okay for most tests but may not work for some cases such as the entanglement entropy calculation.
|
def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:
rho = random_numpy_hermitian(nqubits, dtype=dtype)
# Normalize
ids = np.arange(2 ** nqubits)
rho[ids, ids] = rho[ids, ids] / np.trace(rho)
return rho.astype(dtype)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_density_matrix(states=None, dimensions=None):\n if states is None:\n tdim = np.prod(dimensions)\n dmtotal0 = np.eye(tdim) / tdim\n\n return dmtotal0\n\n dmtotal0 = np.eye(1, dtype=np.complex128)\n\n for i, s in enumerate(states):\n\n if not hasattr(s, \"__len__\"):\n # assume s is int or float showing the spin projection in the pure state\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n state_number = int(round((d - 1) / 2 - s))\n dm_nucleus[state_number, state_number] = 1\n\n else:\n if s.shape.__len__() == 1:\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n np.fill_diagonal(dm_nucleus, s)\n\n else:\n dm_nucleus = s\n\n dmtotal0 = np.kron(dmtotal0, dm_nucleus)\n\n return dmtotal0",
"def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5",
"def gen_positive_definite_matrix(dim: int) -> np.ndarray:\n pdmatrix = np.random.random(size=(dim, dim))\n pdmatrix = np.dot(pdmatrix, pdmatrix.T)\n pdmatrix /= pdmatrix.max()\n return pdmatrix",
"def random_matrix(rows, cols):\n return np.random.randn(rows, cols)",
"def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )",
"def density_matrix(wires) -> \"DensityMatrixMP\":\n wires = Wires(wires)\n return DensityMatrixMP(wires=wires)",
"def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random",
"def random(N, D, rng):\n samples = rng.randn(N, D)\n norm = np.sqrt(np.sum(samples*samples, axis=1))\n return samples/norm[:,None]",
"def _random_not_singular(N):\n data = np.zeros((1, 1))\n while np.linalg.det(data) == 0:\n data = np.random.random((N, N)) + \\\n 1j * np.random.random((N, N)) - (0.5 + 0.5j)\n return data",
"def initializer(shape, dist=\"zero\"):\n if dist == \"zero\":\n M = torch.zeros(shape, dtype=torch.double)\n elif dist == \"one\":\n M = torch.ones(shape, dtype=torch.double)\n elif dist == \"id\":\n M = torch.eye(shape, dtype=torch.double)\n elif dist == \"uniform\":\n M = torch.rand(shape, dtype=torch.double)\n elif dist == \"log_normal\":\n m = torch.distributions.log_normal.LogNormal(\n 0, 1)\n M = m.sample(shape).double()\n elif dist == \"gamma\":\n m = torch.distributions.gamma.Gamma(1, 1)\n M = m.sample(shape).double()\n else:\n print(\"The distribution you gave is unknown... Matrix initialized with zeros\")\n return M",
"def generarMatriz(n):\n\n dist = np.random.randint(MIN_DISTANCIA,MAX_DISTANCIA/2,size=(n,n))#Hasta 50 ya que se va a sumar con la transpuesta duplicando los valores\n dist = dist + dist.T#Para hacer la matriz simetrica\n np.fill_diagonal(dist, 0)#La distancia entre una ciudad y si misma es 0\n return dist",
"def random_normal():\r\n return inverse_normal_cdf(random.random())",
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def gen_mats(seed, shape, n=4, fmt=\"csr\", density=0.1):\n\n np.random.seed(seed)\n dens = density * np.random.random()\n mats = [sp.random(*shape, density=dens, format=fmt) for i in range(n)]\n return mats",
"def _make_gaussian_matrix(\n data_count: int,\n feature_count: int,\n) -> np.ndarray:\n return np.random.randn(data_count, feature_count)",
"def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9",
"def diag_dom(n, num_entries=None):\n if num_entries is None:\n num_entries = int(n**1.5) - n\n A = np.zeros((n,n))\n rows = np.random.choice(np.arange(0,n), size=num_entries)\n cols = np.random.choice(np.arange(0,n), size=num_entries)\n data = np.random.randint(-4, 4, size=num_entries)\n for i in xrange(num_entries):\n A[rows[i], cols[i]] = data[i]\n for i in xrange(n):\n A[i,i] = np.sum(np.abs(A[i])) + 1\n return A",
"def gen_random_matrix(a_size, b_size, AB_d, BA_d, AA_d, BB_d):\n\n def random_gen(n_samples):\n return np.ones(shape=(n_samples,))\n\n AB = sparse.random(a_size, b_size, AB_d, data_rvs=random_gen, format=\"csr\")\n BA = sparse.random(b_size, a_size, BA_d, data_rvs=random_gen, format=\"csr\")\n AA = sparse.random(a_size, a_size, AA_d, data_rvs=random_gen, format=\"csr\")\n BB = sparse.random(b_size, b_size, BB_d, data_rvs=random_gen, format=\"csr\")\n\n return AB, BA, AA, BB",
"def gen_random_matrix_(region_sizes, result, densities=None):\n if densities is None:\n densities = [0.01, 0.02, 0.0001, 0.0005]\n ab, ba, aa, bb = gen_random_matrix(*region_sizes, *densities)\n mc = MatrixConnectivity(ab=ab, ba=ba, aa=aa, bb=bb)\n mc.create_connections()\n reverse_graph = reverse(mc.graph)\n args_dict = mc.compute_stats()\n result[\"full_matrix_stats\"] = print_args_dict(args_dict, out=False)\n to_write = [mc.num_a, mc.num_b]\n\n return mc, reverse_graph, to_write, args_dict",
"def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)",
"def randomize(self, seed_density):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (rand.random() <= seed_density):\r\n self.cells[x][y] = 1",
"def get_D100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A = 0.5*(A + np.conj(A).T)\n A[np.tril_indices(m, -2)] = 0\n A[np.triu_indices(m, 2)] = 0\n return A",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]",
"def get_density(matrix):\n return matrix.getnnz() / (matrix.shape[0] * matrix.shape[1])",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]",
"def test_random_mpd(self):\r\n seed(0)\r\n\r\n distmat = array([[0., 0.85, 0.5, 0.14, 0.36],\r\n [0.85, 0., 0.79, 0.25, 0.47],\r\n [0.5, 0.79, 0., 0.24, 0.46],\r\n [0.14, 0.25, 0.24, 0., 0.8],\r\n [0.36, 0.47, 0.46, 0.8, 0.]])\r\n\r\n # test calculated by hand to ensure correct. first 3 random index draws\r\n # are: [2,0,1], [2,1,0], [1,4,3], avgs = 2.14/3, 2.14/3, 1.52/3\r\n # mean= .62, std means =0.13199326582148888\r\n obs_mean, obs_std = random_mpd(distmat, n=3, iters=3)\r\n assert_almost_equal(obs_mean, 0.64444444444444449)\r\n assert_almost_equal(obs_std, 0.097423600963479878)",
"def createDist(N):\n return np.random.normal(loc=1000.,scale=5.,size=np.random.poisson(lam=N))",
"def test_probability_density(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n assert expected_result - 1e-16 < result < expected_result + 1e-16",
"def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp"
] |
[
"0.63794065",
"0.6172743",
"0.6084907",
"0.6083014",
"0.60053265",
"0.5925018",
"0.59105027",
"0.58204406",
"0.5809415",
"0.5804072",
"0.57617676",
"0.5752207",
"0.57248026",
"0.57116",
"0.562421",
"0.5615175",
"0.5615049",
"0.56099176",
"0.56061727",
"0.55972105",
"0.55879873",
"0.55756325",
"0.5571673",
"0.5570786",
"0.5559333",
"0.5559333",
"0.5546785",
"0.5516564",
"0.55132663",
"0.55031425"
] |
0.692391
|
0
|
Test IO of .surf
|
def test_geometry():
surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated"))
coords, faces = read_geometry(surf_path)
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
# Test quad with sphere
surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere"))
coords, faces = read_geometry(surf_path)
assert_equal(0, faces.min())
assert_equal(coords.shape[0], faces.max() + 1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_surf():\n def f(x, y):\n sin, cos = numpy.sin, numpy.cos\n return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)\n\n x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]\n s = surf(x, y, f)\n mlab.show()\n #cs = contour_surf(x, y, f, contour_z=0)\n return",
"def test_morph_data():\n curv_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"curv\"))\n curv = read_morph_data(curv_path)\n assert_true(-1.0 < curv.min() < 0)\n assert_true(0 < curv.max() < 1.0)",
"def __init__(self, renderSurf):\n self.surf = renderSurf",
"def extract_surf(filename, upright = False, octaves = 4, intervals = 4, init_sample = 2, thres = 0.0004):\n\tnkeypoints = C.c_int(0)\n\t\n\tresult = _lib.extract_surf(filename,upright,octaves,intervals,init_sample,thres,C.pointer(nkeypoints))\n\tnkeypoints = nkeypoints.value\n\tkeypoints = np.array(result[:4*nkeypoints])\n\tdkpts = 4*nkeypoints\n\tdescr = 64*nkeypoints\n\tkeypoints = np.array(result[:dkpts])\n\tdescriptors = np.array(result[dkpts:dkpts+descr])\n\t_lib.free(result)\n\treturn keypoints.reshape(nkeypoints,4),descriptors.reshape(nkeypoints,64)",
"def surf_plot(x, y, z, filename, title = None, xlabel = None, ylabel = None, zlabel = None, elev = 0, azim = 0, **surf_kwargs):\n # Checking that the x- and y- and z- inputs are equal in length \n if len(x) != len(y) != len(z):\n raise LengthError()\n\n fig = plt.figure() # Creates blank figure\n ax = fig.gca(projection='3d') # Creating 3-dimensional axes\n fig.set_size_inches(18, 10) # Sets figure size\n\n # Plotting the surface - specifying the colormap, and setting the surface to opaque (with antialiased = False)\n ax.plot_trisurf(x, y, z, cmap = cm.coolwarm, linewidth=0, antialiased=False, **surf_kwargs) \n\n # Setting plot parameters\n ax.set_title(title, fontsize = 24, pad = 15)\n ax.set_xlabel(xlabel, fontsize=18, labelpad = 15)\n ax.set_ylabel(ylabel, fontsize=18, labelpad = 15)\n ax.set_zlabel(zlabel, fontsize=18, labelpad = 15)\n ax.tick_params(axis='both', which='major', pad=10)\n ax.set_zlim(0, 1.0) # z-axis limits set to [0,1] as the z-axis refers to probability in our case.\n\n ax.view_init(elev=elev, azim=azim) # Sets 'camera angle' of surface plot, for saving\n # f-string allows save filepath to be set inside the plt.savefig() function\n plt.savefig(f'{os.path.join(plot_path,filename)}.pdf', dpi = 200) # Saving the plot in the 'plots' folder",
"def assertIsSurfGifti(*args):\n for fname in args:\n assert fname.endswith('.surf.gii'), \\\n 'file must be a surface gifti (surf.gii): {}'.format(fname)",
"def py_SurfStatAvSurf(filenames, fun = np.add, output_surfstat=False):\n \n if filenames.ndim is not 2:\n raise ValueError('Filenames must be a 2-dimensional array.')\n \n for i in range(0, filenames.shape[0]):\n surfaces = np.empty(filenames.shape[1], dtype=np.object)\n for j in range(0, filenames.shape[1]):\n \n # Check whether input is BSPolyData or a filename. \n if isinstance(filenames[i,j], BSPolyData):\n surfaces[j] = filenames[i,j] \n else:\n surfaces[j] = read_surface(filenames[i,j])\n \n # Concatenate second dimension of filenames. \n if j is 0:\n tri = get_cells(surfaces[j]) \n coord = get_points(surfaces[j])\n else:\n tri = np.concatenate((tri, get_cells(surfaces[j]) + coord.shape[0]), axis=0)\n coord = np.concatenate((coord, get_points(surfaces[j])), axis=0)\n \n if i is 0:\n m = 1\n coord_all = coord\n else:\n coord_all = fun(coord_all,coord)\n m = fun(m,1)\n \n coord_all = coord_all / m \n \n if output_surfstat:\n surface = {'tri': np.array(tri) + 1, 'coord': np.array(coord_all).T}\n else:\n surface = build_polydata(coord_all, tri)\n \n return surface",
"def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)",
"def test_file_io(self):\n temp_directory = tempfile.mkdtemp()\n filename = os.path.join(temp_directory, \"test.h5\")\n\n points = np.array([\n [0.1, 0.1, 0.1],\n [1.1, 2.1, 3.1],\n [1.3, 2.2, 3.4]])\n voxel_size = 0.5\n min_corner = Vector3f(-1, -2, -3)\n vg = VoxelGrid(voxel_size, min_corner=min_corner, points=points)\n\n # test writing\n vg.save(filename)\n self.assertTrue(os.path.isfile(filename))\n\n # test reading\n vg2 = VoxelGrid.from_file(filename)\n self.assertAlmostEquals(voxel_size, vg2.voxel_size)\n np.testing.assert_array_almost_equal(vg.min_corner, vg2.min_corner)\n shutil.rmtree(temp_directory)",
"def py_SurfStatInflate(surf, w=0.5, spherefile=None):\n \n v = surf['coord'].shape[1]\n \n if v <= 81924:\n # MATLAB RAPPING FOR *obj FILE READ IN --> has to be changed...\n if spherefile is None:\n spherefile = 'sphere.obj'\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri']) \n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 81924:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = -1 *sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = -sphere['coord'][0,:] * (sphere['coord'][0,:] < 0) \n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n if spherefile is None:\n spherefile = 'lh.sphere'\n # MATLAB RAPPING FOR *sphere FILE READ IN --> has to be changed...\n sphere_mat = eng.SurfStatReadSurf(spherefile)\n sphere = {}\n sphere['tri'] = np.array(sphere_mat['tri'])\n sphere['coord'] = np.array(sphere_mat['coord'])\n \n if v == 327684:\n sphere['tri'] = np.concatenate((sphere['tri'],\n sphere['tri']+v), axis=1)\n col1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n col2 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n x = np.concatenate((col1,col2))\n x = x.reshape(1, len(x))\n row2 = row3 = sphere['coord'][1:3,:]\n y = np.concatenate((row2,row3), axis=1)\n sphere['coord'] = np.concatenate((x,y))\n else:\n if surf['coord'][0,:].mean()/abs(surf['coord'][0,:]).mean() <-0.5:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] < 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n else:\n row1 = sphere['coord'][0,:] * (sphere['coord'][0,:] > 0)\n row1 = row1.reshape(1, len(row1))\n sphere['coord'] = np.concatenate((row1,\n sphere['coord'][1:3,:]))\n maxs = surf['coord'].max(1)\n mins = surf['coord'].min(1)\n maxsp = sphere['coord'].max(1)\n minsp = sphere['coord'].min(1)\n surfw = surf\n\n for i in range(0,3): \n surfw['coord'][i,:] = ((sphere['coord'][i,:] - minsp[i]) / \\\n (maxsp[i]-minsp[i]) * (maxs[i]-mins[i]) + mins[i]) * w + \\\n surf['coord'][i,:]*(1-w) \n\n return surfw",
"def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')",
"def test_scatter(X, Y, Z, x_test, y_test, Z_validate, surface_name='MADGE Interpolate Surface', filename='test.html'):\n trace_surface = go.Surface(x=X, y=Y, z=Z, name=surface_name)\n test_scatter = go.Scatter3d(x=x_test, y=y_test, z=Z_validate, mode='markers',\n name='Interpolated Values')\n data = [trace_surface, test_scatter]\n fig = go.Figure(data=data)\n py.offline.plot(fig, filename=filename)",
"def test_voxel(self):\n for m in [g.get_mesh('featuretype.STL'),\n g.trimesh.primitives.Box(),\n g.trimesh.primitives.Sphere()]:\n for pitch in [.1, .1 - g.tol.merge]:\n surface = m.voxelized(pitch=pitch)\n\n # make sure the voxelized pitch is similar to passed\n assert g.np.allclose(surface.pitch, pitch)\n\n for fill_method in ('base', 'orthographic'):\n solid = surface.copy().fill(method=fill_method)\n\n assert len(surface.encoding.dense.shape) == 3\n assert surface.shape == surface.encoding.dense.shape\n assert surface.volume > 0.0\n\n assert isinstance(surface.filled_count, int)\n assert surface.filled_count > 0\n\n box_surface = surface.as_boxes()\n box_solid = solid.as_boxes()\n\n assert isinstance(box_surface, g.trimesh.Trimesh)\n assert abs(box_solid.volume - solid.volume) < g.tol.merge\n\n assert g.trimesh.util.is_shape(\n surface.sparse_indices, (-1, 3))\n assert len(\n solid.sparse_indices) >= len(\n surface.sparse_indices)\n assert solid.sparse_indices.shape == solid.points.shape\n outside = m.bounds[1] + m.scale\n for vox in surface, solid:\n assert vox.sparse_indices.shape == vox.points.shape\n assert g.np.all(vox.is_filled(vox.points))\n assert not vox.is_filled(outside)\n\n try:\n cubes = surface.marching_cubes\n assert cubes.area > 0.0\n except ImportError:\n g.log.info('no skimage, skipping marching cubes test')\n\n g.log.info('Mesh volume was %f, voxelized volume was %f',\n m.volume,\n surface.volume)",
"def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im",
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def test_evi(self):\n scene = Landsat8Scene(self.filenames)\n geoimg = scene.evi()\n self.assertEquals(geoimg.nbands(), 1)\n self.assertTrue('evi' in geoimg.bandnames())",
"def test_uncompressed(mode, size, test_file):\n\n with Image.open(test_file) as im:\n assert im.format == \"DDS\"\n assert im.mode == mode\n assert im.size == size\n\n assert_image_equal_tofile(im, test_file.replace(\".dds\", \".png\"))",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def run_read_output_surf(self, remove = False):\n tcl_name = output_folder + \"/surface_output_\" + str(self.input_pdb_path).split(\"/\")[-1][0:-4] + \"_\" + str(self.current_chain) + \".tcl\"\n run_command = \"play \"+ tcl_name\n evaltcl(run_command)\n output_name = output_folder + \"/surface_output_\" + str(self.input_pdb_path).split(\"/\")[-1][0:-4] + \"_\" + str(self.current_chain)",
"def test_bore_smoke():\n gef = read_bore(os.path.join(BasePath, \"../test_files/example_bore.gef\"))\n axes = plotting.plot_bore(gef)\n assert isinstance(axes, plt.Axes)",
"def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now",
"def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed",
"def test_has_alpha(self):\n image_3d = np.array([[ # One image with shape (1, 2, 3)\n [1, 2, 3],\n [4, 5, 6]\n ]])\n image_4d = np.array([[ # One image with shape (1, 3, 4)\n [1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11]\n ]])\n image_5d = np.array([[ # One image with shape (1, 1, 5)\n [1, 2, 3, 4, 5]\n ]])\n self.assertEqual(localHDR.has_alpha(image_3d), False)\n self.assertEqual(localHDR.has_alpha(image_4d), True)\n self.assertEqual(localHDR.has_alpha(image_5d), False)",
"def test_read_from_raster_file(cleantopo_br):\n with mapchete.open(cleantopo_br.dict) as mp:\n tile = mp.config.process_pyramid.tile(5, 0, 0)\n user_process = mapchete.MapcheteProcess(\n tile=tile,\n params=mp.config.params_at_zoom(tile.zoom),\n input=mp.config.get_inputs_for_tile(tile),\n )\n with user_process.open(\"file1\") as f:\n assert f.read().shape == f.read([1]).shape == (1, *f.read(1).shape)",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_to_file_assert_filetype():\n output_file = \"./out.shp\"",
"def plotSurface(surfaceFile, comp=2, points=False, tris=False,\n profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):\n verts,data,tris = load_h5(surfaceFile)\n\n if comp==3: #radial displacements\n z = np.hypot(data[:,:,0], data[:,:,1]).flatten()\n else:\n z = data[:,:,comp].flatten()\n #z = data[:,:,comp].flatten()\n x = verts[:,0] / xscale\n y = verts[:,1] / yscale\n\n #NOTE: need to change grid for linear spacing to work properly\n xi = np.linspace(x.min(), x.max(), x.size)\n yi = np.linspace(y.min(), y.max(), y.size)\n zi = griddata(x,y,z, xi,yi, interp='nn') #'nn'\n\n #NOTE: getting error message here...\n # linear interpolation requires exactly the same limits\n #xi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #yi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #zi = griddata(x,y,z, xi,yi, interp='linear') #'nn'\n #ValueError: output grid must have constant spacing when using interp='linear'\n\n if ax==None:\n plt.figure()\n else:\n ax = plt.axes(ax)\n\n #plt.pcolor(xi, yi, zi, cmap=plt.cm.jet) #Very slow...\n x1, x2, y1, y2 = [x.min(), x.max(), y.min(), y.max()]\n im = plt.imshow(zi, cmap=plt.cm.jet, norm=norm, extent=[x1, x2, y1, y2])\n\n if annotate:\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n plt.title('{} Displacement'.format(compdict[comp]))\n plt.xlabel('Distance [m]')\n plt.ylabel('Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')\n\n if points:\n plt.plot(x,y,'k.')\n\n if type(tris) is np.ndarray:\n plt.triplot(x, y, tris, 'k-')\n\n # EW profile line through the x-axis\n if profile:\n plt.axhline(linewidth=2, color='r')\n Zi = zi[x.size/2,:]\n plt.figure()\n plt.plot(xi, Zi, 'b.-')\n plt.title('Profile')\n plt.xlabel('Distance [m]')\n plt.ylabel('{} Displacement [m]'.format(compdict[comp]))\n\n return im",
"def test_grdimage_file():\n fig = Figure()\n fig.grdimage(\n \"@earth_relief_01d_g\",\n cmap=\"ocean\",\n region=[-180, 180, -70, 70],\n projection=\"W0/10i\",\n shading=True,\n )\n return fig",
"def test_sanity_dxt3():\n\n with Image.open(TEST_FILE_DXT3) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"RGBA\"\n assert im.size == (256, 256)\n\n assert_image_equal_tofile(im, TEST_FILE_DXT3.replace(\".dds\", \".png\"))"
] |
[
"0.6641745",
"0.5991208",
"0.5901561",
"0.5861314",
"0.5859329",
"0.5802738",
"0.56625646",
"0.5633906",
"0.56201524",
"0.56140715",
"0.56027144",
"0.55701965",
"0.55435073",
"0.55309486",
"0.55102223",
"0.548895",
"0.54658955",
"0.5429626",
"0.538379",
"0.5362354",
"0.53097713",
"0.53035",
"0.5276363",
"0.5263682",
"0.5261737",
"0.5261737",
"0.5251877",
"0.52419126",
"0.5238595",
"0.52234674"
] |
0.6341031
|
1
|
Test IO of morphometry data file (eg. curvature).
|
def test_morph_data():
curv_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "curv"))
curv = read_morph_data(curv_path)
assert_true(-1.0 < curv.min() < 0)
assert_true(0 < curv.max() < 1.0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_open_file(self):\n\t\tposition, potential = schrodinger.open_file('potential_energy.dat')\n\t\tself.assertEqual(position, [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477])\n\t\tself.assertEqual(potential, [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0])",
"def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")",
"def test_read(self):\n for line in TESTIMAGES.split('\\n'):\n vals = line.strip().split()\n name = vals[0]\n logger.debug(\"Testing file %s\" % name)\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = raxisimage()\n obj.read(os.path.join(os.path.dirname(self.mar), name))\n\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin [%s,%s]\" % (mini, obj.getmin()))\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax [%s,%s]\" % (maxi, obj.getmax()))\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean [%s,%s]\" % (mean, obj.getmean()))\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev [%s,%s]\" % (stddev, obj.getstddev()))\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")\n self.assertNotEqual(obj.dim1, obj.dim2, \"dim2!=dim1\")",
"def test_file_io(self):\n temp_directory = tempfile.mkdtemp()\n filename = os.path.join(temp_directory, \"test.h5\")\n\n points = np.array([\n [0.1, 0.1, 0.1],\n [1.1, 2.1, 3.1],\n [1.3, 2.2, 3.4]])\n voxel_size = 0.5\n min_corner = Vector3f(-1, -2, -3)\n vg = VoxelGrid(voxel_size, min_corner=min_corner, points=points)\n\n # test writing\n vg.save(filename)\n self.assertTrue(os.path.isfile(filename))\n\n # test reading\n vg2 = VoxelGrid.from_file(filename)\n self.assertAlmostEquals(voxel_size, vg2.voxel_size)\n np.testing.assert_array_almost_equal(vg.min_corner, vg2.min_corner)\n shutil.rmtree(temp_directory)",
"def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n filename = get_data_file_path(\"molecules/butane_multi.sdf\")\n\n with pytest.raises(\n ValueError,\n match=\"Specified file or file-like.*exactly one molecule\",\n ):\n Molecule(filename, allow_undefined_stereo=True)",
"def test_good_load(self):\n self.r0.save_to_file([self.r0, self.r1])\n objs = self.r0.load_from_file()\n self.assertEqual(str(objs[0]), '[Rectangle] (1) 0/0 - 2/3')\n self.assertEqual(str(objs[1]), '[Rectangle] (2) 0/0 - 4/6')",
"def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)",
"def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n with pytest.raises(ValueError) as exc_info:\n filename = get_data_file_path(\"molecules/zinc-subset-tripos.mol2.gz\")\n molecule = Molecule(filename, allow_undefined_stereo=True)",
"def test_write_expected(self):\n\n filename = tempfile.mkstemp()[1]\n writer_method = neuroml.writers.ArrayMorphWriter.write\n writer_method(self.big_arraymorph,filename)\n\n loader_method = neuroml.loaders.ArrayMorphLoader.load\n doc = loader_method(filename)\n array_morph = doc.morphology[0]\n\n connectivity_equal = np.testing.assert_array_equal(array_morph.connectivity,self.big_arraymorph.connectivity)\n physical_masks_equal = np.testing.assert_array_equal(array_morph.physical_mask,self.big_arraymorph.physical_mask)\n vertices_equal = np.testing.assert_array_equal(array_morph.vertices,self.big_arraymorph.vertices)\n\n\n self.assertEqual(connectivity_equal,None) #None when equal\n self.assertEqual(physical_masks_equal,None) #None when equal\n self.assertEqual(vertices_equal,None) #None when equal ",
"def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())",
"def test_fileobj(self, ext, dtype):\n sample_rate = 16000\n num_frames = 3 * sample_rate\n num_channels = 2\n with self.assertRaisesRegex(ValueError, \"SoX backend does not support reading\"):\n self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames)",
"def test_exposure(self):\n lcname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)\n ufname = os.path.join(self.datadir, 'monol_testA_uf.evt')\n command = \"{0} {1}\".format(lcname, ufname)\n\n hen.exposure.main(command.split())\n fname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lccorr' + HEN_FILE_EXTENSION)\n assert os.path.exists(fname)\n ftype, contents = hen.io.get_file_type(fname)\n\n assert isinstance(contents, Lightcurve)\n assert hasattr(contents, 'expo')",
"def test_write_OPK_to_shp_file(self):\r\n arr_oris = [{'altitude': 53.534337, 'id': 'IMG_1468832894.185000000.jpg', 'easting': 657739.197431,\r\n 'pitch': -172.350586, 'heading': -75.622522, 'roll': -40.654833, 'northing': 6860690.284637}]\r\n\r\n # on export le shapefile a partir des donnees pour le tests\r\n write_OPK_to_shp_file(arr_oris,\r\n self.test_shapefile,\r\n b_export_view_dir=False)\r\n # on tests si la methode a exporte les fichiers\r\n # url: http://stackoverflow.com/questions/82831/how-to-check-whether-a-file-exists-using-python\r\n self.assertTrue(exists(self.test_shapefile))\r\n\r\n # lecture d'un shapefile\r\n r = shapefile.Reader(self.test_shapefile)\r\n # geometries\r\n shapes = r.shapes()\r\n # extraction de la listes des points\r\n list_points = shapes[0].points\r\n # 1 point definit dans le shapefile\r\n self.assertEqual(len(shapes), 1)\r\n # on tests le type de la shape stockee\r\n # url: http://www.esri.com/library/whitepapers/pdfs/shapefile.pdf\r\n # type == 1 => Shape type=Point\r\n self.assertEqual(shapes[0].shapeType, 1)\r\n # on utilise extract_center_dict_ori (qui est doctestee)\r\n self._raise_assert_on_np_is_close_all(list_points[0], extract_center_dict_ori(arr_oris[0])[:2])",
"def test_get_thermo_data(self):\n output_directory = os.path.join(self.mop4.settings.fileStore, '..', '..')\n self.mop4.set_default_output_directory(output_directory)\n self.gauss3.set_default_output_directory(output_directory)\n self.molpro1.set_default_output_directory(output_directory)\n\n mol = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\n\n with self.assertRaises(Exception):\n self.mop4.get_thermo_data(mol)\n self.gauss3.get_thermo_data(mol)\n self.molpro1.get_thermo_data(mol)",
"def testFileInRead(self, mockPath):\n mockPath.return_value = 'bananaphone.ccc'\n self.node = cdl_convert.ColorCollection(input_file='mybestfile.ccc')\n\n mockPath.assert_called_once_with('mybestfile.ccc')\n\n self.assertEqual(\n 'bananaphone.ccc',\n self.node.file_in\n )",
"def test_read_image(self):\n pass",
"def test_geometry():\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"inflated\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)\n\n # Test quad with sphere\n surf_path = pjoin(data_path, \"surf\", \"%s.%s\" % (\"lh\", \"sphere\"))\n coords, faces = read_geometry(surf_path)\n assert_equal(0, faces.min())\n assert_equal(coords.shape[0], faces.max() + 1)",
"def verify_file_content(file_path: pathlib.Path, model: OscalBaseModel):\n model.oscal_read(file_path)",
"def test_whole_file(self):\n import soundfile\n import numpy as np\n\n data_file_path = \"sample-data/flacformats.d2\"\n expected_format = \"FLAC\"\n expected_subtype = \"PCM_24\"\n\n # Read the file using standard file I/O\n sf1 = soundfile.SoundFile(data_file_path)\n self.assertEqual(sf1.format, expected_format)\n self.assertEqual(sf1.subtype, expected_subtype)\n data1 = sf1.read()\n\n # Read the file using HTTP\n with open(data_file_path, \"rb\") as f:\n file_content = {\"/foo.dat\": f.read()}\n with DummyHTTPServer(file_content) as server:\n url = server.url(\"/foo.dat\")\n file2 = wfdb.io._url.openurl(url, \"rb\")\n sf2 = soundfile.SoundFile(file2)\n self.assertEqual(sf2.format, expected_format)\n self.assertEqual(sf2.subtype, expected_subtype)\n data2 = sf2.read()\n\n # Check that results are equal\n np.testing.assert_array_equal(data1, data2)",
"def test_to_file_assert_filetype():\n output_file = \"./out.shp\"",
"def test_read_mol_input():\n # good input\n read_mol_input(os.path.join(TEST_DIR, \"example_mol_input_file.txt\"))\n # good input with extra spaces\n read_mol_input(os.path.join(TEST_DIR, \"example2_mol_input_file.txt\"))\n # no such file error\n assert_raises(FileNotFoundError, read_mol_input, 'no-such-file')\n # qcm appears twice\n assert_raises(ValueError, read_mol_input, os.path.join(TEST_DIR, \"bad1_mol_input_file.txt\"))\n # missing struct type\n assert_raises(ValueError, read_mol_input, os.path.join(TEST_DIR, \"bad2_mol_input_file.txt\"))",
"def test_readfile_fits(self):\n fitsname = os.path.join(self.datadir, 'monol_testA.evt')\n command = \"{0}\".format(fitsname)\n\n hen.io.main(command.split())",
"def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r",
"def test_extract_round_invalid():\n file = Path(\"src/core/tests/files/rectangle.STL\").absolute()\n assert \"error\" in extract(file=file, sphere_type=2)",
"def test_read_lxyr(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n # gt_file = os.path.join(cwd, 'test_files/test_gt.lxyr')\n # ground_truths = read_lxyr(gt_file)\n test_dir = os.path.join(cwd, 'test_files/')\n ground_truths = read_lxyr(test_dir, 'test_gt')\n # print ground_truths\n self.assertTrue(any(\n gt for gt in ground_truths\n if gt.x == 553 and gt.y == 132\n and gt.radius == 16.64 and gt.class_value == 3))\n self.assertTrue(any(\n gt for gt in ground_truths\n if gt.x == 119 and gt.y == 631\n and gt.radius == 15.0 and gt.class_value == 4))",
"def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):\n root_path = pysteps.rcparams.data_sources[\"fmi\"][\"root_path\"]\n filename = os.path.join(root_path, \"20160928\",\n \"201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz\")\n metadata = pysteps.io.importers._import_fmi_pgm_metadata(filename, gzipped=True)\n geodata = pysteps.io.importers._import_fmi_pgm_geodata(metadata)\n\n smart_assert(geodata[variable], expected, tolerance)",
"def test_irr_read(irregular_written_data):\n\n fp, written = irregular_written_data\n with openEDF(fp) as reader:\n arr = reader.read(0)\n #imprecision due to 2-byte conversion so tolerance set to 1 unit\n assert np.allclose(written, arr, equal_nan=True, atol=1)",
"def test_image(self):\r\n self.testdata = open(TESTDATA_FILENAME).read()",
"def _test(self, file_name):\n data = bob.io.base.load(file_name)\n assert (_data == data).all()"
] |
[
"0.6433859",
"0.62288296",
"0.59984636",
"0.59970236",
"0.59490573",
"0.5776016",
"0.572016",
"0.56419414",
"0.563778",
"0.56109965",
"0.5610844",
"0.56056154",
"0.5592752",
"0.55828625",
"0.55790675",
"0.5567408",
"0.5532343",
"0.5524342",
"0.5518242",
"0.55145156",
"0.5513294",
"0.55112875",
"0.5499942",
"0.5490039",
"0.54879194",
"0.54873425",
"0.54827946",
"0.5474199",
"0.54694515",
"0.5466654"
] |
0.71642715
|
0
|
Test IO of .annot
|
def test_annot():
annots = ['aparc', 'aparc.a2005s']
for a in annots:
annot_path = pjoin(data_path, "label", "%s.%s.annot" % ("lh", a))
labels, ctab, names = read_annot(annot_path)
assert_true(labels.shape == (163842, ))
assert_true(ctab.shape == (len(names), 5))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()",
"def test_st_annotation00101m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m1.xsd\",\n instance=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m1_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_annotation00101m1_positive_378(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Notation/annotation/annotation00101m/annotation00101m1.xsd\",\n instance=\"sunData/Notation/annotation/annotation00101m/annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_st_annotation00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m2.xsd\",\n instance=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m2_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_annotations00101m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m1.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)",
"def test_sequence_annotate(self):\n self.t(\"1,2 annotate note\")\n code, out, err = self.t(\"_get 1.annotations.1.description 2.annotations.1.description\")\n self.assertEqual(\"note note\\n\", out)",
"def test_annotate_image(self):\n im_path = test_im_dir / \"square-im-1.png\"\n image = li.Image(im_path)\n\n label = li.Label(\"Test Label\", (0.50, 0.50))\n image.annotate(label)\n annotated_im = image.data\n\n # Ensure the shape is retained\n shape_expected = image.data_original.shape\n shape_test = annotated_im.shape\n self.assertEqual(\n shape_test, shape_expected, msg=\"shape not retained after annotation\"\n )\n\n if PLOT:\n imsave(\"/tmp/annotate_image.png\", annotated_im, check_contrast=False)",
"def has_annotations(filepath):\n return filepath.endswith('.ll') and '[#uses=' in open(filepath).read()",
"def test_annotations00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m2.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m2_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ",
"def test_3():\n results = base_tests(long_anno=1)\n assert results[0][\"Consequence\"] == \"To Long Annotation\"",
"def test_annotation00101m1_positive_170(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/CType/annotation/annotation00101m/annotation00101m1.xsd\",\n instance=\"sunData/CType/annotation/annotation00101m/annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_annotations00101m5_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m5.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m5_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_exac_annotator_1(test_scheme, setup_annotator, get_test_file,\n get_test_vcf_record, get_empty_maf_record):\n vcf_path = get_test_file('fake_exac.vcf.gz')\n annotator = setup_annotator(test_scheme, source=vcf_path) \n\n vcf_record = get_test_vcf_record(\n chrom='chr1', pos=10, stop=10, ref='C', \n alleles=('C', 'G'), alts=('G',))\n expected = {\n 'nontcga_ExAC_AF': 40 / float(70),\n 'nontcga_ExAC_AF_Adj': 10 / float(200),\n 'nontcga_ExAC_AF_AFR': 10 / float(10),\n 'nontcga_ExAC_AF_AMR': 5 / float(10),\n 'nontcga_ExAC_AF_EAS': 5 / float(10),\n 'nontcga_ExAC_AF_FIN': 5 / float(10),\n 'nontcga_ExAC_AF_NFE': 5 / float(10),\n 'nontcga_ExAC_AF_OTH': 5 / float(10),\n 'nontcga_ExAC_AF_SAS': 5 / float(10)\n }\n maf_record = annotator.annotate(get_empty_maf_record, vcf_record)\n \n for k in expected:\n assert maf_record[k].value == expected[k]",
"def test_annotations00101m6_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m6.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m6_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_annotations00101m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m3.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m3_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_annotations00101m4_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Schema/annotations/annotations00101m/annotations00101m4.xsd\",\n instance=\"sunData/Schema/annotations/annotations00101m/annotations00101m4_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_exac_annotator_3(test_scheme, setup_annotator, get_test_file,\n get_test_vcf_record, get_empty_maf_record):\n vcf_path = get_test_file('fake_exac.vcf.gz')\n annotator = setup_annotator(test_scheme, source=vcf_path) \n\n vcf_record = get_test_vcf_record(\n chrom='chr2', pos=10, stop=13, ref='ACTT', \n alleles=('ACTT', 'AG'), alts=('AG',))\n expected = {\n 'nontcga_ExAC_AF': 1 / float(70),\n 'nontcga_ExAC_AF_Adj': 1 / float(200),\n 'nontcga_ExAC_AF_AFR': 1 / float(10),\n 'nontcga_ExAC_AF_AMR': 0 / float(10),\n 'nontcga_ExAC_AF_EAS': 0 / float(10),\n 'nontcga_ExAC_AF_FIN': 0 / float(10),\n 'nontcga_ExAC_AF_NFE': 0 / float(10),\n 'nontcga_ExAC_AF_OTH': 0 / float(10),\n 'nontcga_ExAC_AF_SAS': 0 / float(10)\n }\n maf_record = annotator.annotate(get_empty_maf_record, vcf_record)\n for k in expected:\n assert maf_record[k].value == expected[k]",
"def test_annotation00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/CType/annotation/annotation00101m/annotation00101m2.xsd\",\n instance=\"sunData/CType/annotation/annotation00101m/annotation00101m2_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def isOk(annots):\n if annots == []:\n return True\n for a in annots:\n for label in a.labels:\n if (label != 'hasOkCopies' and\n label != 'hasBadCopies' and\n not label.startswith('count_')):\n return False\n return True",
"def test_exac_annotator_4(test_scheme, setup_annotator, get_test_file,\n get_test_vcf_record, get_empty_maf_record):\n vcf_path = get_test_file('fake_exac.vcf.gz')\n annotator = setup_annotator(test_scheme, source=vcf_path) \n\n vcf_record = get_test_vcf_record(\n chrom='chr1', pos=10, stop=10, ref='C', \n alleles=('C', 'T'), alts=('T',))\n expected = {\n 'nontcga_ExAC_AF': None, \n 'nontcga_ExAC_AF_Adj': None, \n 'nontcga_ExAC_AF_AFR': None, \n 'nontcga_ExAC_AF_AMR': None, \n 'nontcga_ExAC_AF_EAS': None, \n 'nontcga_ExAC_AF_FIN': None, \n 'nontcga_ExAC_AF_NFE': None, \n 'nontcga_ExAC_AF_OTH': None, \n 'nontcga_ExAC_AF_SAS': None\n }\n maf_record = annotator.annotate(get_empty_maf_record, vcf_record)\n \n for k in expected:\n assert maf_record[k].value == expected[k]",
"def test_annotation00101m1_positive_362(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/MGroup/annotation/annotation00101m/annotation00101m1.xsd\",\n instance=\"sunData/MGroup/annotation/annotation00101m/annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_loading_document(self):",
"def do_parse_annotations(xmls_path_in, xml_path_out):\n label = \"puzzle piece\"\n new_xml = \"<?xml version='1.0' encoding='ISO-8859-1'?>\"\n new_xml += \"<dataset>\\n<name>Puzzle dataset</name>\\n<comment>Created by imglab tool.</comment>\\n\"\n new_xml += \"<images>\\n\"\n for i, annot in enumerate(glob.glob(xmls_path_in + \"/*.xml\")):\n tree = ET.parse(annot)\n root = tree.getroot()\n left = root[6][4][0].text\n top = root[6][4][1].text\n width = 70#str(int(root[6][4][2].text) - int(left))\n height = 70#str(int(root[6][4][3].text) - int(top))\n new_xml += \"{}<image file='{}jpg'>\\n\".format(1 * ' ', annot[:-3])\n new_xml += \"{}<box top='{}' left='{}' width='{}' height='{}'>\\n\".format(2 * ' ', top, left, width, height)\n new_xml += \"{}<label>{}</label>\\n{}</box>\\n'{}</image>\\n\".format(3 * ' ', label, 2 * ' ', 1 * ' ')\n\n new_xml += \"</images>\\n</dataset>\"\n out_file = open(xml_path_out, \"w\")\n out_file.write(new_xml)\n out_file.close()",
"def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")",
"def test_annotation00101m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/annotation/annotation00101m/annotation00101m3.xsd\",\n instance=\"sunData/ElemDecl/annotation/annotation00101m/annotation00101m3_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def test_ad_annotation00101m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/AttrDecl/AD_annotation/AD_annotation00101m/AD_annotation00101m1.xsd\",\n instance=\"sunData/AttrDecl/AD_annotation/AD_annotation00101m/AD_annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )",
"def main(gt_dir='./data/Oxford_Robot_ICCV19/anno', devkit_dir = './dataset/robot_list/'):\n with open(join(devkit_dir, 'info.json'), 'r') as fp:\n info = json.load(fp)\n image_path_list = join(devkit_dir, 'val.txt')\n label_path_list = join(devkit_dir, 'label.txt')\n mapping = np.array(info['label2train'], dtype=np.int)\n gt_imgs = open(label_path_list, 'r').read().splitlines()\n gt_imgs = [join(gt_dir, x) for x in gt_imgs]\n\n for ind in range(len(gt_imgs)):\n label = np.array(Image.open(gt_imgs[ind]))\n label = label_mapping(label, mapping)\n label = label[:,:,0].astype(np.uint8)\n name_tmp = gt_imgs[ind].replace('anno','anno_color')\n save([label, name_tmp])\n \n return",
"def test_annotation00101m13_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/MGroup/annotation/annotation00101m/annotation00101m13.xsd\",\n instance=\"sunData/MGroup/annotation/annotation00101m/annotation00101m13_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )"
] |
[
"0.6033968",
"0.59515923",
"0.5841626",
"0.58330965",
"0.5818863",
"0.5812621",
"0.5801064",
"0.5762653",
"0.57568604",
"0.5750474",
"0.5732317",
"0.5732033",
"0.5713418",
"0.570204",
"0.56920904",
"0.5657469",
"0.56272537",
"0.5614729",
"0.5610254",
"0.5597071",
"0.5585642",
"0.5584765",
"0.55810654",
"0.5568554",
"0.5478576",
"0.5475865",
"0.5473496",
"0.5467528",
"0.5464405",
"0.54629225"
] |
0.7134083
|
0
|
Test IO of .label
|
def test_label():
label_path = pjoin(data_path, "label", "lh.BA1.label")
label = read_label(label_path)
# XXX : test more
assert_true(np.all(label > 0))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))",
"def test_labels(self):\n self.compliance_tester.test_labels(self.oi)",
"def is_label(self, label: str) -> bool:\n return label in self.is_label_of",
"def _is_label(self) -> bool:\n return self.lines[self.counter].startswith(\"(\") and self.lines[\n self.counter\n ].endswith(\")\")",
"def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False",
"def test_label(self):\n try:\n t = self.OntTerm(label='diffuse')\n raise AssertionError(f'should fail {t!r}')\n except TypeError:\n pass",
"def tests_ti_file_get_label(self):\n super().indicator_get_label()",
"def has_label(self):\n return self.has_udev_property('ID_FS_LABEL_ENC')",
"def has_label(self, label):\n return label in self.get_labels()",
"def has_label(self, label):\n return label == self.label",
"def test_issue_get_label(self):\n pass",
"def load_label(self, pr):\n return",
"def label(self):\r\n raise NotImplementedError",
"def read_label(self):\r\n # label = str(self.parse_binary())#!!BAD\r\n label = ''\r\n while True:\r\n c = self.eat_char()\r\n if c=='n':\r\n #terminal char\r\n break\r\n else:\r\n label += c\r\n\r\n self.log += \"'\" + label + \"'\"\r\n return label",
"def has_label(self, label):\n\t\t\treturn label in self.labels",
"def tests_ti_file_add_label(self):\n super().indicator_add_label()",
"def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())",
"def test_issue_create_label(self):\n pass",
"def _check_for_labels(self):\n check = True\n if 'labels' not in self.mapper:\n check = False\n return check",
"def getLabel(*args):",
"def getLabel(*args):",
"def getLabel(*args):",
"def has_label(self, label):\n\t\treturn label in self.labels",
"def has_label(self):\n return self.label is not None",
"def checkLabel(label):\n\n label = str(label)\n if not label:\n raise ValueError('label cannot be empty string')\n\n label = str(label)\n\n if not label:\n raise ValueError('label cannot be empty string')\n\n if not label[0].isalpha():\n raise ValueError('label must start with a letter')\n\n if not (''.join(label.split('_'))).isalnum():\n raise ValueError('label may contain alphanumeric characters and '\n 'underscore, {0} is not valid'.format(label))\n\n if isReserved(label):\n raise ValueError('{0} is a reserved word and cannot be used '\n 'as a label'.format(repr(label)))\n\n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(label))\n\n return label",
"def _get_label(cls, file_name):\n if cls == \"neg\":\n return \"0\"\n else:\n return \"1\"\n # reg = _REGEX_\n # rmtch = reg.match(file_name)\n # if rmtch:\n # return rmtch.groupdict()[\"label\"]\n # else:\n # return \"unknown_positive\"",
"def check_labels (points, labels, fun):\n your_labels = fun (points)\n return (labels == your_labels)",
"def want_label(self, op):\n return self.want_line(r'\\s*\\S*(%s)\\S*\\:.*' % (op))",
"def click_on_label(step, label):\r\n\r\n with AssertContextManager(step):\r\n elem = world.browser.find_element_by_xpath(str(\r\n '//label[normalize-space(text()) = \"%s\"]' % label))\r\n elem.click()",
"def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5"
] |
[
"0.698072",
"0.6907531",
"0.68103695",
"0.68100667",
"0.6782609",
"0.67555594",
"0.67435974",
"0.64980084",
"0.64621615",
"0.6399174",
"0.6392844",
"0.62826055",
"0.62643975",
"0.6254232",
"0.62371147",
"0.6222561",
"0.6195714",
"0.61815184",
"0.617836",
"0.6169217",
"0.6169217",
"0.6169217",
"0.61624116",
"0.6157906",
"0.614958",
"0.6144681",
"0.6126059",
"0.6081833",
"0.6049334",
"0.6049332"
] |
0.7863483
|
0
|
Returns signature for a given message, z
|
def sign(self, msg):
z = int.from_bytes(helper.hash256(msg), "big")
k = self.deterministic_k(z)
k_inv = pow(k, N-2, N)
r = (k*G).x.num
s = (z + r * self.secret) * k_inv % N
if s > N/2:
s = N - s
return Signature(r, s)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_signature_for_message(message, filename='private.key'):\n message = dict(sorted(message.items()))\n message = json.dumps(message)\n\n private_key_path = os.path.join('keys', filename)\n with open(private_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n h = SHA.new(message.encode()).digest()\n signature = private_key.sign(h, '')\n\n return base64.b64encode(bytes(str(signature[0]).encode()))",
"def sign(self, message):\n return Signature(self._sk.sign(message))",
"def compute_signature(msg):\n hashkey = memcache.Client().get('CURL_TEST_SERVER_HASHKEY')\n h = hmac.new(hashkey, msg, hashlib.sha1)\n signature = urllib.quote(base64.b64encode(h.digest()))\n return signature",
"def sign_message(self, message):\n if self.private_key:\n if isinstance(message, str):\n utf8 = message.encode('utf-8')\n else:\n raise TypeError(\"message must be a string.\")\n signature = self.private_key.sign(utf8).to_base64()\n return signature\n else:\n return None",
"def get_signature(self, message):\n signature = calc_signature(message, self.negotiate_flags,\n self.outgoing_signing_key,\n self.outgoing_seq_num, self.outgoing_handle)\n self.outgoing_seq_num += 1\n\n return signature.get_data()",
"def signmessage(self, address, message):\n return self.proxy.signmessage(address, message)",
"def sign(self, message, randombytes=urandom):\r\n int_header = 0x30 + logn[self.n]\r\n header = int_header.to_bytes(1, \"little\")\r\n\r\n salt = randombytes(SALT_LEN)\r\n hashed = self.hash_to_point(message, salt)\r\n\r\n # We repeat the signing procedure until we find a signature that is\r\n # short enough (both the Euclidean norm and the bytelength)\r\n '''\r\n print(\"---------Inside sign----------\")\r\n '''\r\n while(1):\r\n if (randombytes == urandom):\r\n s = self.sample_preimage(hashed)\r\n '''\r\n print(\"s: \", s)\r\n '''\r\n else:\r\n seed = randombytes(SEED_LEN)\r\n s = self.sample_preimage(hashed, seed=seed)\r\n norm_sign = sum(coef ** 2 for coef in s[0])\r\n norm_sign += sum(coef ** 2 for coef in s[1])\r\n # Check the Euclidean norm\r\n if norm_sign <= self.signature_bound:\r\n\r\n enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)\r\n # Check that the encoding is valid (sometimes it fails)\r\n if (enc_s is not False):\r\n return header + salt + enc_s\r\n '''\r\n else:\r\n print(\"-------------INVALID encoding---------------\")\r\n\r\n else:\r\n print(\"-------------NOT within signature bound---------------\")\r\n '''",
"def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature",
"def sign(self, message):\n\n assert len(message) == 32\n assert self.sec is not None\n r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim)\n r0, s0 = r.binary(), s.binary()\n assert len(r0) <= 32 and len(s0) <= 32\n sig = pack(\"H32sH32s\", len(r0), r0, len(s0), s0)\n return sig",
"def sign(message, G, d, timing_list = None):\r\n\r\n global random\r\n\r\n if random is None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n k = random(128)\r\n\r\n if timing_list == None:\r\n return _sign(long(sha256(message).hexdigest(), 16), G, d, k)\r\n\r\n begin_time = clock()\r\n signature = _sign(message, G, d, k)\r\n timing_list.append(clock() - begin_time)\r\n\r\n return signature",
"def make_signature(secret: VersionedSecret, message: str, max_age: datetime.timedelta) -> bytes:\n version = 1\n expiration = int(time.time() + max_age.total_seconds())\n header = _HEADER_FORMAT.pack(version, expiration)\n digest = _compute_digest(secret.current, header, message)\n return base64.urlsafe_b64encode(header + digest)",
"def daily_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()",
"def get_signature(self, local_json: Dict) -> str:\n return get_signature(self._private_key, self._construct_signature_str(local_json))",
"def get_signature(self, signature_name=None):\n return None, None",
"def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id",
"def gpgSignMessage(message):\n sig = gpg.sign(message, default_key=primary, passphrase=passphrase)\n if sig and sig.data:\n return sig.data",
"def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))",
"def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")",
"def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig",
"def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q-1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)",
"def signature(self) -> str:\n return self[\"Sns\"][\"Signature\"]",
"def signature(self, p_int): # real signature unknown; restored from __doc__\n return \"\"",
"def get_signature(self):\n return \" \".join(self.segments[-1].unixtext.replace(\n u\"\\n\", \" \").strip().split())",
"def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q - 1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)",
"def _sign_string(message, private_key_file=None, private_key_string=None):\r\n try:\r\n from M2Crypto import EVP\r\n except ImportError:\r\n raise NotImplementedError(\"Boto depends on the python M2Crypto \"\r\n \"library to generate signed URLs for \"\r\n \"CloudFront\")\r\n # Make sure only one of private_key_file and private_key_string is set\r\n if private_key_file and private_key_string:\r\n raise ValueError(\"Only specify the private_key_file or the private_key_string not both\")\r\n if not private_key_file and not private_key_string:\r\n raise ValueError(\"You must specify one of private_key_file or private_key_string\")\r\n # if private_key_file is a file object read the key string from there\r\n if isinstance(private_key_file, file):\r\n private_key_string = private_key_file.read()\r\n # Now load key and calculate signature\r\n if private_key_string:\r\n key = EVP.load_key_string(private_key_string)\r\n else:\r\n key = EVP.load_key(private_key_file)\r\n key.reset_context(md='sha1')\r\n key.sign_init()\r\n key.sign_update(str(message))\r\n signature = key.sign_final()\r\n return signature",
"def monthly_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()",
"def _get_signature(search_results: SearchResults) -> Text:\n # Was previously logic here. Leaving method in case it's needed again\n return COMMENT_SIGNATURE",
"def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):\r\n message = signing_format_message(method, headers_dict, body_dict)\r\n\r\n # hmac needs a byte string for it's starting key, can't be unicode.\r\n hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)\r\n signature = binascii.b2a_base64(hashed.digest()).rstrip('\\n')\r\n authorization_header = \"SSI {}:{}\".format(access_key, signature)\r\n\r\n message += '\\n'\r\n return message, signature, authorization_header",
"def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])",
"def signature(s):\r\n # TODO: rewrite using sorted()\r\n t = list(s)\r\n f=[]\r\n i,j=0\r\n for i in range(len(s)):\r\n for j in range (len(s)-1):\r\n if i!=j:\r\n t[i],t[j]=t[j],t[i]\r\n z=''.join(t)\r\n f.append(z)\r\n return f"
] |
[
"0.6678753",
"0.6355302",
"0.63198376",
"0.63083655",
"0.61424166",
"0.60656065",
"0.6020981",
"0.59173757",
"0.5842446",
"0.5833741",
"0.5825016",
"0.5815025",
"0.5803164",
"0.57968223",
"0.5784481",
"0.5772108",
"0.5763404",
"0.57533115",
"0.57256067",
"0.57234585",
"0.5681732",
"0.567168",
"0.56186",
"0.5565712",
"0.5563815",
"0.555001",
"0.5540273",
"0.54945534",
"0.54934126",
"0.5481983"
] |
0.6566361
|
1
|
Check the permissions store for user and level
|
def check_permissions(user, actor_id, level):
permissions = get_permissions(actor_id)
for pem in permissions:
if pem['user'] == user:
if pem['level'] >= level:
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def hasPerm(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"perm_name\",\"admin_username\")\n if request.auth_name!=request[\"admin_username\"]: \n request.getAuthNameObj().canDo(\"SEE ADMIN PERMISSIONS\")\n return admin_main.getLoader().getAdminByName(request[\"admin_username\"]).hasPerm(request[\"perm_name\"])",
"def permissions():\n pass",
"def perms_check(self, ctx):\r\n\t\tcommand = ctx.invoked_with\r\n\t\ttry:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][command] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\texcept KeyError:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][\"global\"] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def test_permissions(self):\n \n from pages.permissions import PagePermission\n admin = User.objects.get(username='admin')\n page = self.new_page()\n pp = PagePermission(user=page.author)\n self.assertTrue(pp.check('change', page=page, method='GET'))\n self.assertTrue(pp.check('change', page=page, method='POST'))\n \n staff = User.objects.get(username='staff')\n pp = PagePermission(user=staff)\n # weird because nonstaff?\n self.assertTrue(pp.check('change', page=page, method='GET',\n lang='en-us'))\n self.assertFalse(pp.check('change', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('delete', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('add', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('freeze', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('doesnotexist', page=page, method='POST',\n lang='en-us'))",
"def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator",
"async def permission_valid_check(cls):\n pass",
"def test_check_perm(self):\n #Test something that really shouldn't be there\n with pytest.raises(DbException) as err:\n ModulePerm.get_module_perm_by_id(0)\n assert str(err.value) == \"(404, 'Permission not found.')\"\n\n perm = ModulePerm.get_module_perm_by_id(self.permList[0].id)\n assert perm.id == self.permList[0].id\n assert perm.user_id == self.permList[0].user_id\n assert perm.module_id == self.permList[0].module_id\n assert perm.permissions == self.permList[0].permissions\n assert perm.permissions == 15\n\n assert perm.check_perm_read_raw()\n assert perm.check_perm_write_raw()\n assert perm.check_perm_share_raw()\n assert perm.check_perm_own_raw()\n\n perm = ModulePerm.get_module_perm_by_id(self.permList[2].id)\n assert perm.id == self.permList[2].id\n assert perm.user_id == self.permList[2].user_id\n assert perm.module_id == self.permList[2].module_id\n assert perm.permissions == self.permList[2].permissions\n assert perm.permissions == 0\n\n assert not perm.check_perm_read_raw()\n assert not perm.check_perm_write_raw()\n assert not perm.check_perm_share_raw()\n assert not perm.check_perm_own_raw()",
"def check_permissions(self, request):\n for permission in self.get_permissions():\n if not permission.has_permission(request, self):\n self.permission_denied(\n request,\n message=getattr(permission, 'message', None),\n code=getattr(permission, 'code', None)\n )",
"def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)",
"def _check_permissions(server, priv):\n # Check user permissions\n user_pass_host = server.user\n if server.passwd is not None and len(server.passwd) > 0:\n user_pass_host += \":\" + server.passwd\n user_pass_host += \"@\" + server.host\n user = User(server, user_pass_host, False)\n if not user.has_privilege(\"*\", \"*\", priv):\n raise UtilError(\"Not enough permissions. The user must have the \"\n \"%s privilege.\" % priv)",
"def permissions(self):\n return None",
"def can(self, permissions: Union[str, List]) -> bool:",
"def _check_permissions(source: Any, info: Info, kwargs: Dict[str, Any]):\n for permission_class in self.permission_classes:\n permission = permission_class()\n\n if not permission.has_permission(source, info, **kwargs):\n message = getattr(permission, \"message\", None)\n raise PermissionError(message)",
"def _common_check(self, flag):\n has_perms = self.user.is_active and self.user.is_staff and (\n self.user.has_perm('blog.change_membership') or\n self.user.has_perm('blog.change_blog'))\n return has_perms or (self.role in ['O', 'A'] and\n not self.is_left() and\n not self.is_banned() and\n (flag or self.role == 'O'))",
"def determine_perms(self, request, *args, **kwargs):\n if hasattr(request, \"user\") and request.user in Collection.objects.get(id=kwargs['pk']).curators.all():\n # The user is a curator, so they can view and edit\n return {\"can_edit\": True, \"can_view\": True}\n else:\n # The default inherited permission system\n return super().determine_perms(request, *args, **kwargs)",
"def check(self, mode, values=None):\n res_ids = {}\n if self._ids:\n self._cr.execute(\n \"\"\"SELECT DISTINCT res_type, res_id FROM\n workflow_task WHERE id = ANY (%s)\"\"\", (list(self._ids),))\n for rmod, rid in self._cr.fetchall():\n res_ids.setdefault(rmod, set()).add(rid)\n if values:\n if values.get('res_type') and values.get('res_id'):\n res_ids.setdefault(values['res_type'], set())\\\n .add(values['res_id'])\n\n for model, mids in res_ids.items():\n existing_ids = self.pool[model].exists(self._cr, self._uid, mids)\n self.check_base_security(model, existing_ids, mode)\n if not self._uid == SUPERUSER_ID and\\\n not self.env['res.users'].has_group('base.group_user'):\n raise exceptions.AccessError(\n _(\"Sorry, you are not allowed to access this document.\"))",
"def canDo(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"perm_name\",\"admin_username\",\"params\")\n if request.auth_name!=request[\"admin_username\"]: \n request.getAuthNameObj().canDo(\"SEE ADMIN PERMISSIONS\")\n args=[request[\"perm_name\"]]\n args.extend(request.fixList(\"params\"))\n try:\n apply(admin_main.getLoader().getAdminByName(request[\"admin_username\"]).canDo,args)\n return True\n except PermissionException:\n return False",
"def checkRights(self,entry):\n if not self.session.isLoggedin():\n self.logger.debug('Not logged in, we leave checkRights')\n return False\n \n # Ist Eintrag Public (z.B. Authen)\n if entry.get('public'):\n return True\n \n \n rights = entry.get('rights')\n \n if rights is None: \n self.logger.debug('Rights are net set (None), we leave checkRights')\n return True\n\n self.logger.debug('Entryrights: {}'.format(repr(rights)))\n\n found = False\n userRights = self.session.getAttribute('rights')\n self.logger.debug('Userrights: {}'.format(repr(userRights)))\n\n # wurden Rechte gesetzt\n if rights is not None or rights==[]:\n if isinstance(rights,str): rights = rights.split(',')\n \n for right in rights:\n if right.startswith('-'):\n right = right[1:]\n if right in userRights: \n self.logger.debug('Negative righths found: {} is forbidden'.format(right))\n return False\n else:\n if right in (userRights or []):\n found = True \n else:\n # Wenn keine Rechte im Eintrag\n # auf jeden Fall anzeigen\n found = True\n \n self.logger.debug('Result is \"{}\"'.format(found))\n return found",
"def test_get_permissions(self):\n pass",
"def test_filter_user_permissions(self):\n data = {\n \"users\": {\n 1: \"view\",\n 2: \"NONE\",\n }\n }\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 1)\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 2)\n\n check_user_permissions(data, 3)",
"def has_permission(self, request):\n return request.user.is_active \\\n and request.user.groups.filter(name='users').count()",
"def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))",
"def slack_access(s, level=READ):\n try: slack_access_level = settings.SLACK_USERS[s.slack_uid]\n except: return False\n return (slack_access_level & level) != 0",
"def validate_access(self, view, rights, prefix, scope_path, field):\n\n access_level = self.cleaned_data[field]\n\n if not has_access(rights, access_level, scope_path, prefix):\n self._errors[field] = ErrorList([DEF_NO_RIGHTS_FOR_ACL_MSG])\n del self.cleaned_data[field]",
"def check_user_permissions(payload: dict, user_pk: int):\n user_pks = payload.get(\"users\", {}).keys()\n if user_pk in user_pks:\n raise exceptions.PermissionDenied(\"You cannot change your own permissions\")",
"def has_permission(self, request, view):\n # el request nos dara los mismos atributos que nos da el request de las vistas genericas\n # en los customs permissions tenemos que retorna SIEMPRE un booleano (true o false) porque si es verdero procedera con el siguiente permiso o con el controlador final\n print(SAFE_METHODS)\n # SAFE_METHODS son GET, HEAD, OPTIONS\n print(request.user.personalTipo)\n if request.user.personalTipo == 1:\n return True\n else:\n return False\n # if request.method in SAFE_METHODS:\n # return True\n # else:\n # return False"
] |
[
"0.66214114",
"0.65142685",
"0.64695644",
"0.64119565",
"0.64119565",
"0.6395322",
"0.63662887",
"0.63351125",
"0.6318051",
"0.631077",
"0.63089794",
"0.6278865",
"0.62344545",
"0.6213699",
"0.62013096",
"0.61916727",
"0.6188883",
"0.61668557",
"0.6153456",
"0.6152701",
"0.614577",
"0.61426175",
"0.61400056",
"0.61390036",
"0.61221653",
"0.6065684",
"0.60429275",
"0.60426384",
"0.6013575",
"0.5988718"
] |
0.65215534
|
1
|
Add a permission for a user and level to an actor.
|
def add_permission(user, actor_id, level):
try:
permissions = get_permissions(actor_id)
except PermissionsException:
permissions = []
permissions.append({'user': user,
'level': level})
permissions_store[actor_id] = json.dumps(permissions)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def permissions_add(\n self,\n ctx,\n type_: str.lower,\n name: str,\n *,\n user_or_role: Union[Role, utils.User, str],\n ):\n\n if type_ not in {\"command\", \"level\"}:\n return await ctx.send_help(ctx.command)\n\n command = level = None\n if type_ == \"command\":\n name = name.lower()\n command = self.bot.get_command(name)\n check = command is not None\n else:\n level = self._parse_level(name)\n check = level is not PermissionLevel.INVALID\n\n if not check:\n embed = Embed(\n title=\"Error\",\n color=Color.red(),\n description=f\"The referenced {type_} does not exist: `{name}`.\",\n )\n return await ctx.send(embed=embed)\n\n value = self._verify_user_or_role(user_or_role)\n if type_ == \"command\":\n name = command.qualified_name\n await self.bot.update_perms(name, value)\n else:\n await self.bot.update_perms(level, value)\n name = level.name\n if level > PermissionLevel.REGULAR:\n if value == -1:\n key = self.bot.modmail_guild.default_role\n elif isinstance(user_or_role, Role):\n key = user_or_role\n else:\n key = self.bot.modmail_guild.get_member(value)\n if key is not None:\n logger.info(\"Granting %s access to Modmail category.\", key.name)\n await self.bot.main_category.set_permissions(\n key, read_messages=True\n )\n\n embed = Embed(\n title=\"Success\",\n color=self.bot.main_color,\n description=f\"Permission for `{name}` was successfully updated.\",\n )\n return await ctx.send(embed=embed)",
"def addUserPermission(self, name, _type):\n self._client.addUserPermission(name, _type)",
"def post(self, actor_id):\n try:\n Actor.from_db(actors_store[actor_id])\n except KeyError:\n raise APIException(\n \"actor not found: {}'\".format(actor_id), 404)\n args = self.validate_post()\n add_permission(args['user'], actor_id, args['level'])\n permissions = get_permissions(actor_id)\n return ok(result=permissions, msg=\"Permission added successfully.\")",
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def permit_user(self, perm_name, user):\n try:\n perm_set = self.permissions[perm_name]\n except KeyError:\n raise PermissionError(\"Permission does not Exists\")\n else:\n if user.username not in self.authenticator.users:\n raise UsernameNotFoundError\n perm_set.add(user.username)\n if 'add' and 'property' in perm_name:\n user.can_add_property = True",
"def add_user_grant(self, permission, user_id):\r\n acl = self.get_acl()\r\n acl.add_user_grant(permission, user_id)\r\n self.set_acl(acl)",
"def addPermission(self, permission, auth_name, is_group, extra_params):\n\n with DBSession(self.__config_db) as session:\n perm, params = ThriftAuthHandler.__create_permission_args(\n permission, extra_params, session)\n\n if not require_manager(perm, params, self.__auth_session):\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"You can not manage the permission '{0}'\"\n .format(perm.name))\n\n handler = make_handler(perm, params)\n handler.add_permission(auth_name.strip(),\n is_group,\n user_name=self.getLoggedInUser())\n\n session.commit()\n return True",
"def add_user_grant(self, permission, user_id):\n acl = self.get_acl()\n acl.add_user_grant(permission, user_id)\n self.set_acl(acl)",
"def add_permission(self, perm):\n if not self.has_permission(perm):\n self.permissions += perm",
"def add_permission(self, label, aws_account_id, action_name):\r\n return self.connection.add_permission(self, label, aws_account_id, action_name)",
"def add_permission(self, permission: str):\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])",
"def add_permission(self, identity_id, permission):\n # type: (str, str) -> Union[bool, Permission]\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n return self.connection.api_call(\n \"PUT\",\n [\"permissions\", self.id, identity_id, permission],\n model=Permission,\n headers=headers,\n )",
"def add_permission(self, permission):\n self._permissions.add(permission)",
"def _change_access(course, user, level, action):\r\n\r\n try:\r\n role = ROLES[level](course.id)\r\n except KeyError:\r\n raise ValueError(\"unrecognized level '{}'\".format(level))\r\n\r\n if action == 'allow':\r\n role.add_users(user)\r\n elif action == 'revoke':\r\n role.remove_users(user)\r\n else:\r\n raise ValueError(\"unrecognized action '{}'\".format(action))",
"def addRolePermission(self, role, _type):\n self._client.addRolePermission(role, _type)",
"def grant_permission(self, extra_permission=None):\n permission_list = self.get_permission()\n if extra_permission is not None:\n permission_list += (\n extra_permission\n if isinstance(extra_permission, (tuple, list))\n else [extra_permission]\n )\n for perm in self.get_permission():\n obj = self.get_permission_object()\n assign_perm(perm, self.user, obj)",
"def add(self, *values):\n\t\tself.permissions.extend(values)\n\t\treturn self",
"def check_permissions(user, actor_id, level):\n permissions = get_permissions(actor_id)\n for pem in permissions:\n if pem['user'] == user:\n if pem['level'] >= level:\n return True\n return False",
"def addFlowPermission(self, sendToSwitch, flowPermissionBin):\n self.permissionCt += 1\n if (self.permissionCt % 1000) == 0:\n print (\"%s permissions added (in OFX controller component)\"%self.permissionCt)\n\n data = flowPermissionBin\n msg = self.ofxSys.buildModuleMessage(self.MODULEID, ADDFLOWPERMISSION, data)\n sendToSwitch(msg)",
"def add_permission(self, topic, label, account_ids, actions):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Label' : label}\r\n self.build_list_params(params, account_ids, 'AWSAccountId')\r\n self.build_list_params(params, actions, 'ActionName')\r\n response = self.make_request('AddPermission', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def add_permission(cls, perm, user):\n\n # Ensure we do not create duplicate users\n try:\n user_perm = cls.objects.get(user_id=user.id)\n new_user = False\n except cls.DoesNotExist:\n user_perm = cls(user_id=user.id, permission_list=str(perm))\n new_user = True\n\n if new_user:\n user_perm.save(force_insert=True)\n else:\n # Make sure the usr does not already have the given permission\n existing_perms = cls.get_permissions(user)\n if perm not in existing_perms:\n existing_perms.append(perm)\n\n user_perm.permission_list = \",\".join(existing_perms)\n user_perm.save(force_update=True)",
"def add_ability(self, ability):\n self.abilities.append(ability)",
"def add_permission(self, queue, label, aws_account_id, action_name):\r\n params = {'Label': label,\r\n 'AWSAccountId' : aws_account_id,\r\n 'ActionName' : action_name}\r\n return self.get_status('AddPermission', params, queue.id)",
"def addFlowPermission(self, msgContent):\n self.permissionCt += 1\n if (self.permissionCt % 1000) == 0:\n compTime = time.time() - self.time\n self.time = time.time()\n print (\"%s permissions added (in OFX manager) (%s sec)\"%(self.permissionCt, compTime))\n self.ofxAgent.sendToDp(self.MODULEID, ADDFLOWPERMISSION, msgContent)",
"def add_permission_to_role(self, role: Role, permission: Permission | None) -> None:\n if permission and permission not in role.permissions:\n try:\n role.permissions.append(permission)\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_ADD_PERMROLE.format(permission, role.name))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_PERMROLE.format(e))\n self.get_session.rollback()",
"def set_permission(StackId=None, IamUserArn=None, AllowSsh=None, AllowSudo=None, Level=None):\n pass",
"def addlevel(self, userid, amount):\r\n players[userid].addLevel(amount)",
"def _add(self, name, permissions):\n data = {\"name\": name, \"permissions\": permissions}\n path = self.router.roles\n return self.request(method=\"put\", path=path, json=data)",
"def add(self, capability):\n capability = ircutils.toLower(capability)\n assert capability != '-owner', '\"-owner\" disallowed.'\n self.__parent.add(capability)"
] |
[
"0.7045652",
"0.65704656",
"0.64027405",
"0.62560606",
"0.62560606",
"0.6157055",
"0.6120105",
"0.61065036",
"0.6088077",
"0.6032174",
"0.59692377",
"0.596524",
"0.594055",
"0.5916958",
"0.5858972",
"0.5845438",
"0.56916314",
"0.56907296",
"0.5688241",
"0.5676378",
"0.5671096",
"0.5669694",
"0.5664888",
"0.56296074",
"0.5621192",
"0.5616916",
"0.56042004",
"0.56031",
"0.5568679",
"0.55660486"
] |
0.79164106
|
0
|
Add new permissions for an actor
|
def post(self, actor_id):
try:
Actor.from_db(actors_store[actor_id])
except KeyError:
raise APIException(
"actor not found: {}'".format(actor_id), 404)
args = self.validate_post()
add_permission(args['user'], actor_id, args['level'])
permissions = get_permissions(actor_id)
return ok(result=permissions, msg="Permission added successfully.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_permission(user, actor_id, level):\n try:\n permissions = get_permissions(actor_id)\n except PermissionsException:\n permissions = []\n permissions.append({'user': user,\n 'level': level})\n permissions_store[actor_id] = json.dumps(permissions)",
"def _add(self, name, permissions):\n data = {\"name\": name, \"permissions\": permissions}\n path = self.router.roles\n return self.request(method=\"put\", path=path, json=data)",
"async def permissions_add(\n self,\n ctx,\n type_: str.lower,\n name: str,\n *,\n user_or_role: Union[Role, utils.User, str],\n ):\n\n if type_ not in {\"command\", \"level\"}:\n return await ctx.send_help(ctx.command)\n\n command = level = None\n if type_ == \"command\":\n name = name.lower()\n command = self.bot.get_command(name)\n check = command is not None\n else:\n level = self._parse_level(name)\n check = level is not PermissionLevel.INVALID\n\n if not check:\n embed = Embed(\n title=\"Error\",\n color=Color.red(),\n description=f\"The referenced {type_} does not exist: `{name}`.\",\n )\n return await ctx.send(embed=embed)\n\n value = self._verify_user_or_role(user_or_role)\n if type_ == \"command\":\n name = command.qualified_name\n await self.bot.update_perms(name, value)\n else:\n await self.bot.update_perms(level, value)\n name = level.name\n if level > PermissionLevel.REGULAR:\n if value == -1:\n key = self.bot.modmail_guild.default_role\n elif isinstance(user_or_role, Role):\n key = user_or_role\n else:\n key = self.bot.modmail_guild.get_member(value)\n if key is not None:\n logger.info(\"Granting %s access to Modmail category.\", key.name)\n await self.bot.main_category.set_permissions(\n key, read_messages=True\n )\n\n embed = Embed(\n title=\"Success\",\n color=self.bot.main_color,\n description=f\"Permission for `{name}` was successfully updated.\",\n )\n return await ctx.send(embed=embed)",
"def create_permission(permission, event):\n setDefaultRoles(permission.title, ('Manager',))",
"def add_permission(self, topic, label, account_ids, actions):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Label' : label}\r\n self.build_list_params(params, account_ids, 'AWSAccountId')\r\n self.build_list_params(params, actions, 'ActionName')\r\n response = self.make_request('AddPermission', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def add(self, *values):\n\t\tself.permissions.extend(values)\n\t\treturn self",
"def allow(self, role, *permissions):\n\n if not isinstance(role, string_types):\n role = role.id\n\n rec = self.get(Allow, role)\n if rec is None:\n rec = [Allow, role, set()]\n self.append(rec)\n\n if rec[2] is ALL_PERMISSIONS:\n return\n\n if ALL_PERMISSIONS in permissions:\n rec[2] = ALL_PERMISSIONS\n else:\n rec[2].update(permissions)",
"def add_permission(self, label, aws_account_id, action_name):\r\n return self.connection.add_permission(self, label, aws_account_id, action_name)",
"def add_permissions(self, permissions: List[str]):\n for permission in permissions:\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])",
"def add(self, capability):\n capability = ircutils.toLower(capability)\n assert capability != '-owner', '\"-owner\" disallowed.'\n self.__parent.add(capability)",
"def changePermissions(self, event):\n pass",
"def add_permission(self, perm):\n if not self.has_permission(perm):\n self.permissions += perm",
"def add_permission(self, permission: str):\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])",
"def assign_permissions(sender, instance, created, **kwargs):\n if created:\n assign_perm('view_strand', instance.owner.group, instance)\n assign_perm('change_strand', instance.saver, instance)\n assign_perm('delete_strand', instance.saver, instance)\n assign_perm('view_strand', instance.saver, instance)",
"def addUserPermission(self, name, _type):\n self._client.addUserPermission(name, _type)",
"def addFlowPermission(self, msgContent):\n self.permissionCt += 1\n if (self.permissionCt % 1000) == 0:\n compTime = time.time() - self.time\n self.time = time.time()\n print (\"%s permissions added (in OFX manager) (%s sec)\"%(self.permissionCt, compTime))\n self.ofxAgent.sendToDp(self.MODULEID, ADDFLOWPERMISSION, msgContent)",
"async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)",
"def add_view_permissions(sender, **kwargs):\n from django.contrib.auth.models import Permission\n from django.contrib.contenttypes.models import ContentType\n\n for content_type in ContentType.objects.filter(app_label=sender.label):\n codename = \"view_%s\" % content_type.model\n\n perm, created = Permission.objects.get_or_create(\n content_type=content_type, codename=codename, defaults={\n 'name': 'Can view %s' % content_type.name,\n }\n )\n\n if created:\n sys.stdout.write(\n 'Added view permission for %s' % content_type.name +\n '\\n'\n )",
"def permit_user(self, perm_name, user):\n try:\n perm_set = self.permissions[perm_name]\n except KeyError:\n raise PermissionError(\"Permission does not Exists\")\n else:\n if user.username not in self.authenticator.users:\n raise UsernameNotFoundError\n perm_set.add(user.username)\n if 'add' and 'property' in perm_name:\n user.can_add_property = True",
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def addPermission(self, permission=None, permName=None, kvDict=None):\n return _modelActionBase(self, instance=permission, instanceName=permName, kvDict=kvDict,\n model=get_model('perm'), db=db, action='add', modelType='permission')",
"def add_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n permission, created = Permission.objects.get_or_create(\n codename=\"can_approve_estimated_completion_date\",\n defaults={\n \"name\": \"Can approve estimated completion date\",\n \"content_type\": ContentType.objects.get_for_model(\n apps.get_model(\"barriers\", \"Barrier\")\n ),\n },\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.add(permission)\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" added to the \"Admin\" group.'\n )",
"def addAdminUserActivity(context, request):\n rest_params = {'actor': request.actor,\n 'verb': 'post'}\n\n # Initialize a Activity object from the request\n newactivity = Activity()\n newactivity.fromRequest(request, rest_params=rest_params)\n\n # Search if there's any activity from the same user with\n # the same actor in the last minute\n mmdb = MADMaxDB(context.db)\n query = {\n 'actor.username': request.actor.username,\n 'object.content': newactivity['object']['content'],\n 'published': {'$gt': newactivity.published - timedelta(minutes=1)}\n }\n duplicated = mmdb.activity.search(query)\n\n if duplicated:\n code = 200\n newactivity = duplicated[0]\n else:\n # New User\n code = 201\n activity_oid = newactivity.insert()\n newactivity['_id'] = activity_oid\n\n handler = JSONResourceEntity(newactivity.flatten(squash=['keywords']), status_code=code)\n return handler.buildResponse()",
"def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")",
"def permissions():\n pass",
"def add_view_permissions(sender, **kwargs):\n # for each of our content types\n for content_type in ContentType.objects.all():\n # build our permission slug\n codename = \"view_%s\" % content_type.model\n\n # if it doesn't exist..\n if not Permission.objects.filter(content_type=content_type, codename=codename):\n # add it\n Permission.objects.create(content_type=content_type,\n codename=codename,\n name=\"Can view %s\" % content_type.name)\n # print \"Added view permission for %s\" % content_type.name",
"def add_permission(self, queue, label, aws_account_id, action_name):\r\n params = {'Label': label,\r\n 'AWSAccountId' : aws_account_id,\r\n 'ActionName' : action_name}\r\n return self.get_status('AddPermission', params, queue.id)",
"def add_ability(self, ability):\n self.abilities.append(ability)",
"async def permissions(self, ctx):\r\n perms = [p.replace(\"_\", \" \") for p in PERMS]\r\n embed = discord.Embed(title=\"Permissions that can be passed to Targeter\")\r\n embed.description = humanize_list(perms)\r\n await ctx.send(embed=embed)",
"def addAdminContextActivity(context, request):\n rest_params = {'actor': request.actor,\n 'verb': 'post'}\n\n # Initialize a Activity object from the request\n newactivity = Activity()\n newactivity.fromRequest(request, rest_params=rest_params)\n\n # Search if there's any activity from the same user with\n # the same actor in the last minute\n mmdb = MADMaxDB(context.db)\n query = {\n 'actor.url': request.actor.url,\n 'object.content': newactivity['object']['content'],\n 'published': {'$gt': newactivity.published - timedelta(minutes=1)}\n }\n duplicated = mmdb.activity.search(query)\n\n if duplicated:\n code = 200\n newactivity = duplicated[0]\n else:\n code = 201\n activity_oid = newactivity.insert()\n newactivity['_id'] = activity_oid\n\n handler = JSONResourceEntity(newactivity.flatten(), status_code=code)\n return handler.buildResponse()"
] |
[
"0.7235166",
"0.69934314",
"0.67583215",
"0.6371254",
"0.6335169",
"0.62054914",
"0.61273474",
"0.6100005",
"0.6073607",
"0.60484934",
"0.60419154",
"0.6008811",
"0.599937",
"0.5988471",
"0.5971591",
"0.59599745",
"0.59585696",
"0.5911363",
"0.5890705",
"0.5861503",
"0.5861503",
"0.58429563",
"0.5841469",
"0.5793098",
"0.5786051",
"0.5782285",
"0.57609147",
"0.5758732",
"0.573574",
"0.57291913"
] |
0.70016277
|
1
|
Constitutive equation for NabarroHerring creep.
|
def nabarro_herring_creep(
temperature: np.ndarray, n_shear_stress: np.ndarray) -> np.ndarray:
numerator = (
A_NH * MU * V * D_L * n_shear_stress *
np.exp(-H_L / (R * temperature))
)
denomenator = R * D ** 2 * temperature
strain_rate = numerator/denomenator
return strain_rate
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def coble_creep(\n temperature: np.ndarray, n_shear_stress: np.ndarray) -> np.ndarray:\n numerator = (\n A_C * MU * V * D_G * W * n_shear_stress *\n np.exp(-H_G / (R * temperature))\n )\n denomenator = R * D ** 3 * temperature\n \n strain_rate = numerator/denomenator\n\n return strain_rate",
"def cost_hN(self, node, xg):\n hN = (node.state - xg).T @ self.QN @ (node.state - xg)\n return float(hN)",
"def circuitSat(C):",
"def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6",
"def cost_h(self, node, xg):\n h = (node.state - xg).T @ self.Q @ (node.state - xg)\n return float(h)",
"def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1",
"def price_heston_mc(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_):\r\n esp_ = monte_carlo(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_)\r\n return exp(-r_*T_)*esp_",
"def dislocation_creep(\n temperature: np.ndarray, n_shear_stress: np.ndarray) -> np.ndarray:\n numerator = (\n MU * B * D_L * np.power(n_shear_stress, 3) * \n np.exp(-H_L / (R * temperature))\n )\n\n denomenator = K * temperature\n \n strain_rate = numerator/denomenator\n\n # Note, must use np.exp and np.power instead of math module variants\n # to apply across an ndarray\n\n return strain_rate",
"def king(r, n0, rc, b):\n return n0 * (1. + (r/rc)**2)**b",
"def coefficient(self) -> float:\n ...",
"def planck_f(nu, T):\n return ((2*h*nu**3)/(c**2))*(1./(np.exp((h*nu)/(k*T))-1))",
"def loevinger_coeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif cov == 0.0:\n return 0.0\n else:\n return _div(cov, min(p1 * q2, p2 * q1))",
"def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc",
"def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh",
"def self_diffusion_coefficient(self, n, T):\n Tstar = T / self.epsilon_Kelvin\n omd = self._OmegaDiffusion(Tstar)\n numerator = 3 * (4 * pi * kB * T / self.mass)**(1/2)\n denominator = 16 * pi * n * self.sigma ** 2 * omd\n return numerator/denominator",
"def graphite_cracking_rate_Ai2020(T_dim):\n k_cr = 3.9e-20\n Eac_cr = 0 # to be implemented\n arrhenius = np.exp(Eac_cr / pybamm.constants.R * (1 / T_dim - 1 / 298.15))\n return k_cr * arrhenius",
"def rho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= concave_piece(x,k_ind,m_ind) \n\n return f",
"def cost(self):\n\t\treturn self.g + self.h",
"def king2(r, n0, rc0, b0, n1, rc1, b1):\n return n0 * (1. + (r/rc0)**2)**b0 + n1 * (1. + (r/rc1)**2)**b1",
"def unmitigated_cost(self, rho: numpy.ndarray) -> float:\n return numpy.trace(self.cost * rho).real",
"def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn",
"def sokal_sneath_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + 2 * (b + c))",
"def C_Na_eq():\n global C_Na, C_Mg, C_dNTP\n return C_Na + 120*sqrt(C_Mg - C_dNTP)",
"def curve_no_hillcoef(ph, pka):\n# return ph - pka\n return 1/(10**(pka-ph)+1)",
"def cacl_echelon_holding_cost(node):\n predecessors = node.predecessors\n pred_holding_cost = sum([x.holding_cost for x in predecessors])\n return node.holding_cost - pred_holding_cost",
"def acosh(self):\r\n getcontext().prec += 2\r\n arg = self + (self*self - 1).sqrt1()\r\n ans = arg.ln()\r\n getcontext().prec -= 2\r\n return +ans",
"def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh",
"def clebsch_gordan((J1,M1),(J2,M2),(J3,M3)):\n cg=(-1)**(J2-J1-M3)*math.sqrt(2*J3+1)*pygsl.sf.coupling_3j(int(2*J1), int(2*J2), int(2*J3), int(2*M1), int(2*M2),int(-2*M3))[0]\n #\n return cg",
"def solve():\n # the amount of lattice paths from (0, 0) to (n, k) is (n+k) over n (according to Wikipedia)\n return binomial_coefficient(20 + 20, 20)"
] |
[
"0.66120404",
"0.6235676",
"0.62089044",
"0.6159591",
"0.60804176",
"0.6045265",
"0.602658",
"0.6014804",
"0.6000879",
"0.59716594",
"0.5961475",
"0.59098405",
"0.58900356",
"0.5887956",
"0.5883903",
"0.58825594",
"0.586504",
"0.57670516",
"0.57556",
"0.5742544",
"0.5737319",
"0.57207245",
"0.5716387",
"0.5706547",
"0.5687265",
"0.56827873",
"0.5668652",
"0.56268126",
"0.56185925",
"0.5611223"
] |
0.67318976
|
0
|
Constitutive equation for Coble creep.
|
def coble_creep(
temperature: np.ndarray, n_shear_stress: np.ndarray) -> np.ndarray:
numerator = (
A_C * MU * V * D_G * W * n_shear_stress *
np.exp(-H_G / (R * temperature))
)
denomenator = R * D ** 3 * temperature
strain_rate = numerator/denomenator
return strain_rate
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func",
"def coefficient(self) -> float:\n ...",
"def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6",
"def cole_coeff(self):\n return self.diseq_coeff(standardize=True)",
"def circuitSat(C):",
"def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc",
"def cole(Te, nev):\n e4e2me2=eV2J**(2.5)/epsilon**(2.0)/np.sqrt(me)/4.0/np.pi/2.0**(1.5)\n return e4e2me2*nev*lnlambda(Te,nev)/Te**(1.5)",
"def test_scalar_coeff_cc():\n hs_1 = LocalSpace('q1', basis=('g', 'e'))\n hs_2 = LocalSpace('q2', basis=('g', 'e'))\n kappa = Symbol('kappa', real=True)\n a1 = Destroy(hs=hs_1)\n a2 = Destroy(hs=hs_2)\n\n jc_expr = (\n I / 2 * (2 * kappa * (a1.dag() * a2) - 2 * kappa * (a1 * a2.dag()))\n )\n\n simplified = rewrite_with_operator_pm_cc(jc_expr)\n assert simplified == I * kappa * OperatorPlusMinusCC(\n a1.dag() * a2, sign=-1\n )\n expanded = simplified.doit()\n assert expanded == I * kappa * (a1.dag() * a2 - a1 * a2.dag())",
"def loevinger_coeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif cov == 0.0:\n return 0.0\n else:\n return _div(cov, min(p1 * q2, p2 * q1))",
"def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C",
"def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh",
"def clebsch_gordan((J1,M1),(J2,M2),(J3,M3)):\n cg=(-1)**(J2-J1-M3)*math.sqrt(2*J3+1)*pygsl.sf.coupling_3j(int(2*J1), int(2*J2), int(2*J3), int(2*M1), int(2*M2),int(-2*M3))[0]\n #\n return cg",
"def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c",
"def _excitonic_coft_old(self,SS,AG,n):\n \n # FIXME: works only for 2 level molecules\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n # get number of monomeric basis states\n Na = 0\n for monomer in AG.monomers:\n Na += monomer.nel-1\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n #Na = AG.nmono\n for kk in range(Na):\n \n #nkk = AG.monomers[kk].egcf_mapping[0]\n \n for ll in range(Na):\n \n #nll = AG.monomers[ll].egcf_mapping[0]\n \n ct += ((SS[kk+1,n+1]**2)*(SS[ll+1,n+1]**2)*cfm.get_coft(kk,ll))\n #*AG.egcf_matrix.get_coft(nkk,nll))\n \n return ct",
"def jaccard_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + b + c)",
"def con_cieq(x,project):\n \n cons = project.con_cieq(x)\n \n if cons: cons = array(cons)\n else: cons = zeros([0])\n \n return -cons",
"def cg(self):\n A = self.clAlphaT / self.clAlphaWF\n B = (1 - self.downwashGradW)\n C = (self.surfaceT * self.tlH) / (self.surfaceW * self.cMACW)\n sm = 0.05 * self.cMACW # static margin, approximated as 5% of aircraft MAC\n return self.ac + A * B * C * self.speedRatio**2 - sm",
"def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))",
"def DPc(R,Pc):\n return r2*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc)) \\\n *(R*M)/(K3 + R*M)*Pc - gc*Pc",
"def compute_f_ColeBrook(self, R, e, D):\n # assume a starting correct value for the \"f\" on the right hand side (RHS)\n # uses friction factor from Barr's equation as the starting value.\n f_initial = self.compute_f_BARR(R, self.e, self.D)\n\n relative_roughness = e / D\n a = relative_roughness / 3.71\n b = 2.51 / (R * sqrt(f_initial))\n\n # Compute the f on the LHS ------ (1) \n f_final = 1 / (-2 * log((a + b), 10))**2\n \n # Make sure friction factor is correct to at least 6 decimal place.\n tolerance = 0.0000001\n \n # if the f on the LHS is not within tolerance limit,\n # replace it on the RHS and recompute the f on the LHS till it's within\n # tolerance limit.\n while abs(f_final - f_initial) >= tolerance:\n f_initial = f_final\n b = 2.51 / (R * sqrt(f_initial))\n f_final = 1 / (-2 * log((a + b), 10))**2\n return f_final",
"def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON",
"def calc_vcirc(r,menc,G=1.):\n if G is None: G = 1.\n return np.sqrt(G*menc/r)",
"def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))",
"def JC(lcsc1c2, Pc1, Pc2):\n\n JC = 1/(2*lcsc1c2 - (Pc1 + Pc2))\n return JC",
"def c(x):\n cost = per_widget_cost * x + fixed_cost\n return cost",
"def wce(B):\n return eme*B",
"def calc_cophenetic_coeff(self):\n c, d = cophenet(self.__linkage, self.__distance_matrix)\n return round(c, 3)",
"def covar(self):\n a, c, d, b = self.to_ccw()\n return a * d - b * c",
"def conToCelc(faren):\n return (faren - 32)*(5/9)",
"def calc_chromatic_coupling(self):\n raise NotImplementedError('Chromatic Coupling is not Implemented yet.')"
] |
[
"0.64477867",
"0.6385757",
"0.63402474",
"0.6321891",
"0.6290459",
"0.6224188",
"0.6208865",
"0.61290115",
"0.6116374",
"0.6108744",
"0.61077815",
"0.609768",
"0.6049114",
"0.60460794",
"0.6044214",
"0.5993522",
"0.599171",
"0.59859544",
"0.5965093",
"0.5962999",
"0.5953919",
"0.59493035",
"0.5939659",
"0.59274584",
"0.59212005",
"0.59195495",
"0.59150565",
"0.5913618",
"0.5901819",
"0.5894585"
] |
0.6640319
|
0
|
Finds the maximum value between three np.ndarrays in a list.
|
def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:
temp = np.maximum(array_list[0], array_list[1])
all_maxs = np.maximum(temp, array_list[2])
return all_maxs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_max(list):\n return find_value_at(list, 0)",
"def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)",
"def compare_max(values, weights):\n return np.max(values.numpy())",
"def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])",
"def r_max(nxs):\n largest = None\n for i,e in enumerate(nxs):\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if i == 0 or val > largest:\n largest = val\n\n return largest",
"def find_max(data):\n index = 0\n res = data[index]\n for i in range(1, len(data)):\n if data[i] > res:\n res = float(data[i])\n index = i\n else:\n break\n return res, index",
"def arglexmax(keys, multi=False):\n # Handle keys in reverse order to be consistent with np.lexsort\n reverse_keys = keys[::-1]\n arr = reverse_keys[0]\n breakers = reverse_keys[1:]\n # Look for the maximum value in the first array, and continue using new\n # arrays until a unique maximum index is found.\n _cand_idxs = np.where(arr == arr.max())[0]\n if len(_cand_idxs) > 1:\n for breaker in breakers:\n vals = breaker[_cand_idxs]\n _cand_idxs = _cand_idxs[vals == vals.max()]\n if len(_cand_idxs) == 1:\n break\n # If multiple maximum values are found then either\n # return them all or return an arbitrary one.\n return _cand_idxs if multi else _cand_idxs[0]",
"def d_max(x, y):\n axis = np.argmax(x.shape)\n return np.max(np.array([x, y]), axis=axis)",
"def pmax(\n *x: Iterable,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([max(elem, na_rm=na_rm) for elem in zip(*x)])",
"def r_max(nxs):\n largest = None\n first_time = True\n for e in nxs:\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if first_time or val > largest:\n largest = val\n first_time = False\n\n return largest",
"def get_max(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))",
"def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)",
"def maximum(some_list):\n return max(some_list)",
"def sim_max(sim_mats):\n return np.array(sim_mats).max(axis=0)",
"def get_max_index_of_list(a_list):\n if isinstance(a_list, np.ndarray):\n idx = np.argmax(a_list)\n elif isinstance(a_list, list):\n idx=a_list.index(max(a_list))\n return idx",
"def get_max(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return max(data)",
"def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])",
"def interpolate_max(arr, heights=(0., 1., 2.)):\n if len(arr) != 3:\n return None\n y1, y2, y3 = arr\n x1, x2, x3 = heights\n\n x1 = float(x1)\n x2 = float(x2)\n x3 = float(x3)\n\n num = -(y1*(x2 - x3)*(-x2 - x3)\n + y2*(x1 - x3)*(x1 + x3)\n + y3*(x1 - x2)*(-x1 - x2))\n den = 2. * (y1*(x2 - x3)\n - y2*(x1 - x3)\n + y3*(x2 - x3))\n\n non_zero_den = np.array(den != 0, dtype=bool)\n zero_den = np.array(den == 0, dtype=bool)\n # print zero_den\n max_heights = np.zeros(num.shape, dtype=float)\n old_err_state = np.seterr(divide='raise')\n ignore_states = np.seterr(**old_err_state)\n max_heights = np.copy(num)\n max_heights[non_zero_den] = max_heights[non_zero_den]/den[non_zero_den]\n max_heights[zero_den] = 0\n\n # print np.isnan(max_heights).sum()\n # The maximum of the interpolation may lie outside the given height\n # values. If so, ouput the highest value from the data.\n i = np.logical_or(\n max_heights > max(heights), max_heights < min(heights))\n max_heights[i] = np.argmax(arr, axis=0)[i]\n return max_heights",
"def find_max():\n bridges = all_bridges\n bridges = [ b for b in bridges if b != None ]\n return max(bridges)",
"def Max(data):\n return data.max()",
"def get_max_score(location_list, grid, shape):",
"def task_7_max_value_list_of_lists(data: List[List[int]]) -> int:\n purified_from_empty_dicts = filter(list, data)\n return max(map(max, purified_from_empty_dicts), default=None)",
"def get_max_death(structure_list: List[str]) -> float:\n store_tuples = []\n for structure in structure_list:\n struct = unpickle_persistence_diagram(structure)\n oned_array = convert_persistence_diagrams_1d(struct)\n store_tuples.append(oned_array)\n\n # Combine into one array\n store_tuples = np.vstack(store_tuples)\n max_death = sorted(store_tuples, key=lambda x: x[1], reverse=True)[0]\n max_death = max_death[1]\n return max_death",
"def _multiple_values_max(self, maps, threshold):\r\n max_val = np.zeros((maps.shape[0], maps.shape[1]), dtype=np.float)\r\n for i in range(maps.shape[1]):\r\n cmin = np.min(maps[:,i])\r\n cmax = np.max(maps[:,i])\r\n limit = cmax - (cmax - cmin) * threshold[i]\r\n min_mask = maps[:,i] <= limit\r\n max_mask = maps[:,i] > limit\r\n # for an abundance map the delta is around [-1..1],\r\n # but it can be outside this interval, it's something\r\n # to test\r\n # a guard with a -10 value maybe ok.\r\n rmin = min_mask * -10\r\n max_val[:,i] = max_mask * maps[:,i] + rmin\r\n max_vec = np.max(max_val, axis=1)\r\n max_mask = max_vec > -10\r\n argmax = np.argmax(max_val, axis=1)\r\n return (argmax + 1) * max_mask",
"def _get_max_preds_3d(heatmaps):\n assert isinstance(heatmaps, np.ndarray), 'heatmaps should be numpy.ndarray'\n assert heatmaps.ndim == 5, 'heatmaps should be 5-ndim'\n N, K, D, H, W = heatmaps.shape\n heatmaps_reshaped = heatmaps.reshape((N, K, -1))\n idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1))\n maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1))\n preds = np.zeros((N, K, 3), dtype=np.float32)\n _idx = idx[..., 0]\n preds[..., 2] = _idx // (H * W)\n preds[..., 1] = _idx // W % H\n preds[..., 0] = _idx % W\n preds = np.where(maxvals > 0.0, preds, -1)\n return preds, maxvals",
"def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])",
"def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])",
"def max(self):\n max_i = np.nanargmax(self.ys)\n return self.xs[max_i], self.ys[max_i]",
"def get_maximum_value(dataset):\n d = [int(i) for i in dataset if i.isdigit()]\n op = [o for o in dataset if o in ['*', '-', '+']]\n n = len(d)\n d.insert(0, None)\n op.insert(0, None)\n m = [[0 for x in range(n+1)] for y in range(n+1)]\n M = [[0 for x in range(n+1)] for y in range(n+1)]\n for i in range(1, n+1):\n m[i][i] = d[i]\n M[i][i] = d[i]\n for s in range(1, n):\n for i in range(1, n-s+1):\n j = i + s\n m[i][j], M[i][j] = min_and_max(i, j, op, m, M)\n return M[1][n]",
"def find_max(weather_data):\n if len(weather_data) == 0:\n return()\n\n value = float(weather_data[0])\n position = 0\n\n for index, weather in enumerate(weather_data):\n if float(weather) >= value:\n value= float(weather)\n position = index\n\n return(value, position)"
] |
[
"0.7007491",
"0.6992208",
"0.6961825",
"0.69369406",
"0.6824932",
"0.66760194",
"0.65985566",
"0.6580838",
"0.6564666",
"0.655405",
"0.6506097",
"0.6486567",
"0.64407647",
"0.64095294",
"0.6400788",
"0.63668877",
"0.6343359",
"0.63175714",
"0.63098747",
"0.63023937",
"0.6299065",
"0.6296102",
"0.62661326",
"0.6262078",
"0.6257792",
"0.62458396",
"0.62458396",
"0.62331593",
"0.62290055",
"0.6197138"
] |
0.86902034
|
0
|
This function returns the list of prime number lesser than N.
|
def liste_N_nb_premier(N):
liste = []
i = 0
while len(liste) < N:
if is_prime(i):
liste.append(i)
i += 1
return liste
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_primes(n):\n primeList = []\n for i in range(n):\n if is_prime(i):\n primeList.append(i)\n return primeList",
"def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]",
"def generate_prime_less_than_n(n):\n\tif n <= 1:\n\t\treturn []\n\tlist_of_primes = [2]\n\tfor i in range(3, n, 2):\n\t\tis_prime = True\n\t\tfor j in list_of_primes:\n\t\t\tif i%j == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(i)\n\treturn list_of_primes",
"def primes_less(n):\n test_nums = list(range(3, int(floor(sqrt(n))), 2))\n prime_flags = [True] * ((n - 2) // 2)\n for a in test_nums:\n next_div = a**2\n while next_div < n:\n prime_flags[(next_div-3)//2] = False\n next_div += 2*a\n return [2] + [2*i + 3 for i, flag in enumerate(prime_flags) if flag]",
"def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers",
"def get_primes(n):\n\n return list(primes_sieve(n))",
"def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list",
"def return_prime_numbers_less_tahn_100():\r\n primes = []\r\n for num in range(100):\r\n is_prime = True\r\n for i in range(2, num):\r\n if num % i == 0:\r\n is_prime = False \r\n if is_prime:\r\n primes.append(num)\r\n return primes",
"def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]",
"def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]",
"def first_n_primes(n): \n\tlist_of_primes = []\n\t# the current number that we're checking the primality of\n\tcandidate = 2\n\n\t# keep on finding primes until our list has enough elements\n\twhile len(list_of_primes) < n:\n\t\t# assume that we have a prime number\n\t\tis_prime = True\n\n\t\t# use trial division to determine if it's not prime\n\t\tfor i in range(2, candidate):\n\t\t\t# once we know it's not prime, break!\n\t\t\tif candidate % i == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(candidate)\n\t\tcandidate += 1\n\treturn list_of_primes",
"def list_primes(n):\n\tarr = [True] * n\n\tarr[0] = False\n\tarr[1] = False\n\tfor i in range(2, int(math.sqrt(n)) + 1):\n\t\tif is_prime(i):\n\t\t\tfor j in range(2 * i, n, i):\n\t\t\t\tarr[j] = False\n\tprimes = []\n\tfor i in range(len(arr)):\n\t\tif arr[i]:\n\t\t\tprimes.append(i)\n\treturn primes",
"def primes_lt3(N):\n\t# test every number less than N/2\n\tprimes = [ i for i in xrange(2,N)\n\t\t\t\t if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]\n\n\treturn primes",
"def primes_lt3(N):\n\t# test every number less than N/2\n\tprimes = [ i for i in xrange(2,N)\n\t\t\t\t if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]\n\n\treturn primes",
"def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes",
"def getListOfPrimes(k = 40, n = 1000000):\n\n low = 2 ** (k - 1) # smallest number k bits could be\n lim = min(int(math.sqrt(low)), n + 1) # we don't want to generate any primes larger than n\n\n numList = [True] * lim # initialise boolean list\n primes = [] # initialise list of primes\n\n for i in range(2, lim): # loop through list from index 2\n if numList[i]: # if it is True\n primes.append(i) # must be prime\n\n for j in range(i*i, lim, i): # loop through multiples\n numList[j] = False # setting them to false\n\n return primes # return ptimes",
"def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n",
"def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)",
"def primes_below(n):\n L, M = [2], [x for x in range(3, int(n), 2)]\n if n <= 2:\n print('There are no primes below 2')\n return None\n for i in range(3, int(n), 2):\n if M[i // 2 - 1] != 0 and is_prime(i):\n L.append(i)\n for j in range(i, int(n), 2 * i):\n M[j // 2 - 1] = 0\n return L",
"def primesList(n):\n sieve = [True]*n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[2*i::i] = [False]*(len(sieve[2*i::i]))\n return [2]+[i for i in range(3,n,2) if sieve[i]]",
"def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]",
"def primes(n):\n if n == 0 or n == 1:\n return []\n else:\n p = primes(int(sqrt(n)))\n no_p = { j for i in p for j in xrange(i*2, n+1, i) }\n p = { x for x in xrange(2, n + 1) if x not in no_p }\n return p",
"def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result",
"def find_first_n_primes(n):\n primes = []\n to_check = 2\n\n while len(primes) < n:\n if is_prime(to_check, primes):\n primes.append(to_check)\n to_check += 1\n print('The first {} prime numbers are: {}'.format(n, primes))\n return set(primes)",
"def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]",
"def find_n_primes(n):\n primes = [ ]\n\n if n < 2:\n return None;\n\n primes.append(2)\n\n for i in range(3, n + 1, 2):\n is_prime = True\n for p in primes:\n if i % p is 0:\n is_prime = False\n continue\n if is_prime:\n primes.append(i)\n return primes",
"def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]",
"def get_primes(n):\n primes = [True] * (n / 2)\n for i in range(int((n / 2 - 1) / 2) >> 1):\n for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): \n primes[j] = False\n return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]",
"def get_prime_list(low, num):\r\n prime_gen = prime_generator()\r\n current_prime = 2\r\n\r\n while current_prime < low:\r\n current_prime = next(prime_gen)\r\n\r\n result = [current_prime]\r\n for i in range(num):\r\n result.append(next(prime_gen))\r\n\r\n return result",
"def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]"
] |
[
"0.79166585",
"0.78252643",
"0.77794",
"0.7731997",
"0.7727879",
"0.76638025",
"0.7577047",
"0.7568607",
"0.7564964",
"0.7522414",
"0.7501913",
"0.7497206",
"0.74915093",
"0.74915093",
"0.74238646",
"0.7420592",
"0.7341136",
"0.7333776",
"0.73223823",
"0.7292376",
"0.7289568",
"0.7284872",
"0.7264965",
"0.7258673",
"0.72484946",
"0.7241017",
"0.72115237",
"0.71458465",
"0.71438944",
"0.71362454"
] |
0.7917129
|
0
|
This function checks whether or not a number is truncatable, ie every subnumber written by removing a front or back number is still a prime number.
|
def is_truncatable(nb):
nb = str(nb)
if is_prime(int(nb)):
for i in range(1, len(nb)):
if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):
return False
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_truncatable(number: int):\n\n str_number = str(number)\n index = 0\n\n # Left shift:\n while index < len(str_number):\n if not is_prime(int(str_number[index:])):\n return False\n\n index += 1\n\n # Right shift:\n index = len(str_number)\n while index > 0:\n if not is_prime(int(str_number[:index])):\n return False\n\n index -= 1\n\n return True",
"def is_left_right_truncatable(number_str, prime_str_set):\n l = len(number_str)\n #left truncatable?\n for i in range(l):\n if number_str[i:] not in prime_str_set or number_str[:l-i] not in prime_str_set:\n return False\n return True",
"def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True",
"def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True",
"def is_superprime(x: int) -> bool:\n if x <= 0:\n return False\n\n while x:\n if is_prime(x) == False:\n return False\n x //= 10\n return True",
"def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime(number):\n if number <= 1:\n return False\n\n max_element = int(math.ceil(math.sqrt(number)))\n # iterate through all elements from 2 through sqrt(n)\n for element in range(2,max_element + 1):\n if number % element == 0:\n return False\n\n return True",
"def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True",
"def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True",
"def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False",
"def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True",
"def is_simple_number(x: int):\n divisor = 2\n while divisor < x:\n if x % divisor == 0:\n return False\n divisor += 1\n return True",
"def is_prime(num):\n if not isinstance(num, int):\n return False\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 6 in [0, 2, 3, 4]:\n return False\n div_max = int(math.sqrt(num))\n for div in range(5, div_max + 1, 2):\n if num % div == 0:\n return False\n return True",
"def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True",
"def is_prime(num):\n # 2 is prime; exclude\n if num == 2: \n return True\n \n # exclude all other even numbers and numbers less than 2\n if num % 2 == 0 or num < 2:\n return False\n \n # Only need to count up to the the square root of num\n sqrt = int(num ** 0.5 +1) # int rounds down; correct by +1\n \n # Loop through all odd numbers\n for i in range(3, sqrt, 2):\n if num % i == 0:\n return False\n return True",
"def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True",
"def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res",
"def truncatable_primes():\n list_tp = []\n i = 8\n while len(list_tp) < 11:\n if is_truncatable(i):\n list_tp.append(i)\n i += 1\n if i % 100 == 0:\n print(\"i : \", i)\n return list_tp, sum(list_tp)",
"def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))",
"def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False",
"def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True",
"def isprime(x):\n if x <= 1: return False \n if x % 2 == 0: return x == 2\n for k in range(3, int(sqrt(x))+1, 2): \n if x % k == 0: return False\n return True",
"def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True",
"def is_prime(x):\n if x < 2:\n return False\n for i in range(2, x // 2 + 1):\n if x % i == 0:\n return False\n return True",
"def is_prime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n\n for i in range(2, int(num**(1/2))+1):\n if num % i == 0:\n return False\n\n return True",
"def is_prime(x: int) -> bool:\n if x < 2:\n return False\n if x != 2 and x % 2 == 0:\n return False\n for i in range(3, x // 2 + 1):\n if x % i == 0:\n return False\n return True",
"def isprime(checknumber):\n isprime = 0\n if checknumber % 2 == 0:\n if checknumber != 2:\n return False\n else:\n x = 3\n while x <= int(math.sqrt(checknumber)):\n if checknumber % x == 0:\n return False\n x += 2\n return True"
] |
[
"0.792935",
"0.6656063",
"0.64213675",
"0.63436365",
"0.62462485",
"0.6234058",
"0.6154132",
"0.61109185",
"0.6067006",
"0.6060549",
"0.6056523",
"0.60431683",
"0.60208267",
"0.6009649",
"0.5997058",
"0.59954643",
"0.59928215",
"0.597895",
"0.5948257",
"0.59479547",
"0.59429777",
"0.594072",
"0.59052485",
"0.5904013",
"0.5897508",
"0.5867128",
"0.585656",
"0.5852071",
"0.5836299",
"0.5833261"
] |
0.80308694
|
0
|
This function looks for the 11 numbers greater than 7 which are truncatable.
|
def truncatable_primes():
list_tp = []
i = 8
while len(list_tp) < 11:
if is_truncatable(i):
list_tp.append(i)
i += 1
if i % 100 == 0:
print("i : ", i)
return list_tp, sum(list_tp)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def problem_52():\n\n for number in xrange(1, 123456789):\n sorted_num = ''.join(sorted(str(number)))\n if len([value for value in xrange(2, 7)\n if ''.join(sorted(str((value * number)))) == sorted_num]) == 5:\n return number",
"def has_seven(k):\n \n if k % 10 == 7:\n return True\n else:\n if k<10:\n return False\n return has_seven(k//10)",
"def has_seven(k):\n if k == 0:\n return False\n else:\n if k%10 == 7:\n return True\n return has_seven(k//10)",
"def has_seven(k):\n if k % 10 == 7:\n return True\n elif k < 10:\n return False\n else:\n return has_seven(k // 10)",
"def has_seven(k):\n if k % 10 == 7:\n return True\n elif k < 10:\n return False\n else:\n return has_seven(k // 10)",
"def has_seven(k):\n if k % 10 == 7:\n return True\n elif k < 10:\n return False\n else:\n return has_seven(k // 10)",
"def has_seven(k):\n\tif k % 10 == 7:\n\t\treturn True\n\telif k < 10:\n\t\treturn False\n\telse:\n\t\treturn has_seven(k // 10)",
"def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double",
"def num_sevens(n):\n if n < 10 and n != 7:\n return 0\n else:\n return (n%10 == 7) + num_sevens(n//10)",
"def auto_truncate(val):\n return val[:7]",
"def seven_boom(end_number):\r\n all_nums = []\r\n for n in range(1, end_number + 1):\r\n if n % 7 == 0 or \"7\" in str(n):\r\n all_nums.append(\"Boom\")\r\n else:\r\n all_nums.append(n)\r\n return all_nums",
"def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x",
"def lastTen(self, num, length):\n\t\tif (length-num <=10):\n\t\t\treturn 1\n\t\treturn 0",
"def is_truncatable(nb):\n nb = str(nb)\n if is_prime(int(nb)):\n for i in range(1, len(nb)):\n if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):\n return False\n return True\n else:\n return False",
"def trim_decreasing_digits(self):\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n if choice < int(str(key)[-1]):\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)",
"def is_desc(x):\n while x > 9:\n if x % 10 > x // 10 % 10:\n return False\n x = x // 10\n return True",
"def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0",
"def first_last6(nums):\n if len(nums) == 1:\n if nums.count(6) > 0:\n return True\n else:\n return False\n if nums.pop(0) == 6 or nums.pop(len(nums)-1) == 6:\n return True\n else:\n return False",
"def test_diffrent_truncation_points_properly_fixed(self):\n for index in range(15990, 16000):\n simpletest = self.simpleTestString[0:index]\n lastIndex = simpletest.rfind(\">\")\n simpletest = simpletest[0:lastIndex + 1]\n simpletest = sanitizeFeedback(simpletest)\n complextest = self.complicatedString[0:index]\n lastIndex = complextest.rfind(\">\")\n complextest = complextest[0:lastIndex + 1]\n complextest = sanitizeFeedback(complextest)\n for (openTag, closedTag) in self.tags:\n self.assertNumTagsEqual(openTag, closedTag, simpletest)\n self.assertNumTagsEqual(openTag, closedTag, complextest)",
"def test_half_records_outside_workdays_bottom_range(self):\n input_ = [\n # First record is outside range\n self.indicator_record(date=datetime.date(2000, 12, 29), value=0.058366),\n # Second record is inside range\n self.indicator_record(date=datetime.date(2001, 1, 2), value=0.058400),\n ]\n output = self.expander._daily_workday_indicator_expander(input_)\n expected = 32\n actual = len(output)\n\n self.assertEqual(expected, actual)",
"def is_truncatable(number: int):\n\n str_number = str(number)\n index = 0\n\n # Left shift:\n while index < len(str_number):\n if not is_prime(int(str_number[index:])):\n return False\n\n index += 1\n\n # Right shift:\n index = len(str_number)\n while index > 0:\n if not is_prime(int(str_number[:index])):\n return False\n\n index -= 1\n\n return True",
"def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)",
"def has_small_digits(n,maxdigit):\n digits = [int(num) for num in str(n)]\n return all([num <= maxdigit for num in digits])",
"def main():\r\n lst = list(map(int, list(str(NUMBER))))\r\n product = 0\r\n\r\n for i in range(len(lst)):\r\n\r\n if i + 13 >= len(lst):\r\n break\r\n\r\n thirteen = lst[i:i + 13]\r\n\r\n if prod(thirteen) > product:\r\n product = prod(thirteen)\r\n\r\n print(f'{\" × \".join(list(map(str, thirteen)))} = {product}')",
"def check_digit(tracking_number):\n check_digit = 10 - ((sum(itertools.starmap(operator.mul, zip(itertools.cycle((3, 1)), map(int, str(tracking_number))))) + 1) % 10)\n if check_digit == 10:\n check_digit = 0\n return check_digit",
"def fix_teen(n):\n x = 0\n\n while(x < len(n)):\n if(n[x] in(13, 14, 17,18,19)):\n n[x] = 0\n x = x + 1\n \n return n",
"def _remove_blanks(all_icons: List[numpy.ndarray]) -> List[numpy.ndarray]:\n filtered_icons = []\n for icon in all_icons:\n if icon[20:60, 20:60, 2].min() > 100:\n continue\n filtered_icons.append(icon)\n return filtered_icons",
"def istele(number):\n if number[:3] == '140':\n return True\n return False",
"def fix_teen(n):\n if 13<=n<=14 or 17<=n<=19:\n return 0\n else:\n return n",
"def must_redact(df):\n return df.le(SMALL_NUMBER).any()[0]"
] |
[
"0.57979983",
"0.56591105",
"0.5636077",
"0.55226547",
"0.55226547",
"0.55226547",
"0.5470952",
"0.53913707",
"0.5387149",
"0.5335019",
"0.52921695",
"0.52763057",
"0.52684706",
"0.5260989",
"0.5238879",
"0.522795",
"0.52069545",
"0.5164485",
"0.51632535",
"0.5145592",
"0.5118848",
"0.51038945",
"0.51013327",
"0.5099992",
"0.509725",
"0.5066842",
"0.5058849",
"0.50398165",
"0.50294906",
"0.5028028"
] |
0.5993087
|
0
|
The datetime of the last ping from the SrcSink
|
def last_ping(self) -> datetime:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_source_stamp(self):",
"def get_source_stamp(self):",
"def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")",
"def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")",
"def dest_time(self) -> float:\n return ntp_to_system_time(self.dest_timestamp)",
"def last_config_send_time(self) -> str:\n return pulumi.get(self, \"last_config_send_time\")",
"def dt_last_update(self):\n return self.last_update",
"def get_last_timestamp(self):\n return self._frame_timestamp",
"def getSourceStamp():\n pass",
"def getSourceStamp():\n pass",
"def recv_ts(self) -> int:\n pass",
"def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])",
"def last_timestamp(self):\n return self._last_timestamp",
"def get_last_update_time(self):\n return self.last_update_time",
"def last_update_time(self):\n return self._last_update_time",
"def last_heartbeat_date_time(self):\n if \"lastHeartbeatDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastHeartbeatDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None",
"def get_timestamp(self):\n return datetime.datetime.utcnow()",
"def last_updated(self):\n try:\n return max(self.station_usage, key=lambda x: x.last_update).dt_last_update\n except ValueError:\n return datetime.fromtimestamp(0)",
"def get_last_time(self):\n \n return self._last",
"def last_updated_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_updated_time\")",
"def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None",
"def get_last_refreshed_on_time():\n last_checked_on = Feed.select().aggregate(fn.Max(Feed.last_checked_on))\n if last_checked_on: \n return datetime_as_epoch(last_checked_on)\n \n # Return a fallback value\n return datetime_as_epoch(datetime.utcnow())",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def last_update(self):\n date, time = self.data.get(\"update_date\"), self.data.get(\"update_time\")\n if date is not None and time is not None:\n return datetime.strptime(date + time, \"%d-%m-%Y%H:%M\").replace(\n tzinfo=VIENNA_TIME_ZONE\n )",
"def get_datetime(self):\n timestamp = self.SendTime + self.SendTimeNS / 1000000000.0\n return datetime.datetime.utcfromtimestamp(timestamp)",
"def last_heartbeat_time(self) -> str:\n return pulumi.get(self, \"last_heartbeat_time\")",
"def timestamp(self):\n return self.__timestamp",
"def last_updated_time(self) -> str:\n return pulumi.get(self, \"last_updated_time\")",
"def last_count_update_time(self):\n return self.__last_count_update_time"
] |
[
"0.6409875",
"0.6409875",
"0.62572974",
"0.62572974",
"0.6114063",
"0.60457677",
"0.60435784",
"0.5990316",
"0.59827757",
"0.59827757",
"0.5948047",
"0.59379625",
"0.59183204",
"0.5909179",
"0.589342",
"0.58412385",
"0.5840549",
"0.5835009",
"0.58120304",
"0.58074075",
"0.57887185",
"0.5759026",
"0.57268685",
"0.57268685",
"0.57198447",
"0.5717211",
"0.5702144",
"0.5689329",
"0.56841624",
"0.5671722"
] |
0.7502264
|
0
|
The SrcSink the Meta data relates to.
|
def srcsink(self) -> SrcSink:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Source(self):\r\n\t\treturn self._get_attribute('source')",
"def get_sink(self, **kwargs: Dict) -> Sink:\n if kwargs[\"format\"] in SINK_MAP:\n s = SINK_MAP[kwargs[\"format\"]]\n return s(self, **kwargs)\n else:\n raise TypeError(f\"{kwargs['format']} in an unrecognized format\")",
"def source(self):\n return self._group.stream",
"def get_source(self):\n return self.source",
"def get_source(self):",
"def getSource(self):\n return self.source",
"def source(self):\n return self._client.group.stream",
"def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]",
"def source(self):\n return self._source",
"def source(self):\n return self._source",
"def source(self):\n return self._source",
"def source(self):\n return self._source",
"def source(self):\n return self._source",
"def source(self):\n return self.__source",
"def destination(self):\n return self._destination",
"def source(self) -> Station:\n return self._source",
"def URL(self):\n return self._sourceurl",
"def get_media_destination(self):\n assert self.__context is not None\n inner_media_destination = self._media_point.get_media_destination()\n dtmf_media_destination = self.__dtmf_sender.get_media_destination()\n dtmf_media_source = self.__dtmf_sender.get_media_source()\n self.__inner_link = self.__transcoding_factory.create_link(self.__context, dtmf_media_source, inner_media_destination)\n return dtmf_media_destination",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def source(self) -> str:\n return pulumi.get(self, \"source\")",
"def getSource(self):\n return self.__source",
"async def stream_source(self) -> str:\n if not self._stream_enabled:\n return None\n return self._stream_source",
"def get_destination(self):\n\n return self.destination",
"def source(self) -> DatasetSource:\n return self._source",
"def getDestination(self):\n return self.__destination",
"def source(self) -> str:\n return self._source"
] |
[
"0.61999077",
"0.6132542",
"0.5978382",
"0.5866137",
"0.5770781",
"0.57464147",
"0.57042056",
"0.5678696",
"0.5667629",
"0.5667629",
"0.5667629",
"0.5667629",
"0.5667629",
"0.5651825",
"0.5609882",
"0.5577742",
"0.55600435",
"0.55342245",
"0.5527913",
"0.5527913",
"0.5527913",
"0.5527913",
"0.5527913",
"0.5527913",
"0.552537",
"0.5503377",
"0.55006933",
"0.5499222",
"0.5445466",
"0.54322505"
] |
0.7397219
|
0
|
Normalize Base String URI per `Section 3.4.1.2`_.
|
def normalize_base_string_uri(uri, host=None):
uri = to_unicode(uri)
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, params, '', ''))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not path:\r\n path = \"/\"\r\n\r\n # Could do syntax based normalization of the URI before\r\n # computing the digest. See Section 6.2.2 of Std 66.\r\n request_uri = query and \"?\".join([path, query]) or path\r\n scheme = scheme.lower()\r\n defrag_uri = scheme + \"://\" + authority + request_uri\r\n\r\n return defrag_uri",
"def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')",
"def normalize_uri(uri):\n return normalize_uri_result(uri).unsplit()",
"def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri",
"def normalize_url(self, url):\n pass",
"def norm(url):\n url = _unicode(url) # operate on unicode strings\n url_tuple = urlparse(url)\n normalized_tuple = norm_tuple(*url_tuple)\n return urlunparse(normalized_tuple).replace(' ','%20')",
"def url_norm(url, encoding=None, strip=False, lowercase_path=False, remove_fragment=False):\n\n if strip:\n url = url.strip()\n\n if isinstance(url, unicode):\n # try to decode the URL to ascii since urllib.unquote()\n # handles non-unicode strings differently\n try:\n url = url.encode('ascii')\n except UnicodeEncodeError:\n pass\n encode_unicode = True\n else:\n encode_unicode = False\n urlparts = list(urlparse.urlsplit(url))\n\n #fix missing scheme\n if not urlparts[0] or not urlparts[1]:\n urlparts = list(fix_missing_scheme(url, urlparts))\n elif urlparts[0] not in default_scheme_for_port:\n # Todo: find the scheme with the min edit distance\n pass\n\n # scheme\n if not http_scheme_pattern.match(urlparts[0]):\n raise InvalidUrl(url)\n\n urlparts[0] = urllib.unquote(urlparts[0]).lower()\n # host (with path or query side effects)\n is_idn = url_fix_host(urlparts)\n # query\n urlparts[3] = url_parse_query(urlparts[3], encoding=encoding)\n if urlparts[0] in urlparse.uses_relative:\n # URL has a hierarchical path we should norm\n if not urlparts[2]:\n # Empty path is allowed if both query and fragment are also empty.\n # Note that in relative links, urlparts[0] might be empty.\n # In this case, do not make any assumptions.\n if urlparts[0] and (urlparts[3] or urlparts[4]):\n urlparts[2] = '/'\n else:\n # fix redundant path parts\n urlparts[2] = collapse_segments(urlparts[2])\n if not remove_fragment:\n # anchor\n urlparts[4] = urllib.unquote(urlparts[4])\n # quote parts again\n urlparts[0] = url_quote_part(urlparts[0], encoding=encoding) # scheme\n urlparts[1] = url_quote_part(urlparts[1], safechars='@:', encoding=encoding) # host\n urlparts[2] = url_quote_part(urlparts[2], safechars=_nopathquote_chars, encoding=encoding) # path\n\n if lowercase_path:\n urlparts[2] = urlparts[2].lower()\n\n if remove_fragment:\n urlparts[4] = ''\n else:\n urlparts[4] = url_quote_part(urlparts[4], encoding=encoding) # anchor\n\n if not urlparts[2]:\n urlparts[2] = '/'\n\n res = urlunsplit(urlparts)\n\n if encode_unicode:\n res = unicode(res)\n return res, is_idn",
"def get_normalized_url(url):\r\n scheme, netloc, path, params, query, fragment = urlparse(url)\r\n\r\n # Exclude default port numbers.\r\n if scheme == 'http' and netloc[-3:] == ':80':\r\n netloc = netloc[:-3]\r\n elif scheme == 'https' and netloc[-4:] == ':443':\r\n netloc = netloc[:-4]\r\n if scheme not in ('http', 'https'):\r\n raise ValueError(\"Unsupported URL %s (%s).\" % (url, scheme))\r\n\r\n # Normalized URL excludes params, query, and fragment.\r\n return urlunparse((scheme, netloc, path, None, None, None))",
"def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))",
"def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)",
"def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url",
"def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url",
"def normalizeURIPath(path):\n ret = libxml2mod.xmlNormalizeURIPath(path)\n return ret",
"def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]",
"def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"",
"def normalize_url(url):\n parse = urlparse(url)\n\n # netloc should be lowercase\n netloc = parse.netloc.lower()\n if parse.scheme == \"http\":\n if netloc.endswith(\":80\"):\n netloc = netloc[:-3]\n\n elif parse.scheme == \"https\" and netloc.endswith(\":443\"):\n netloc = netloc[:-4]\n\n # add a '/' at the end of the netloc if there in no path\n if not parse.path:\n netloc = netloc + \"/\"\n\n return \"{}://{}{}\".format(parse.scheme, netloc, parse.path)",
"def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))",
"def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()",
"def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean",
"def normalize_uri_result(uri):\n ref = uri_reference(uri).normalize()\n\n return ref._replace(\n authority=normalize_uri_authority(ref),\n query=normalize_uri_query(ref),\n path=normalize_uri_path(ref),\n )",
"def NormalizeLocation (uri, parent_uri=None, prefix_map=None):\n if uri is None:\n return uri\n if parent_uri is None:\n abs_uri = uri\n else:\n abs_uri = urlparse.urljoin(parent_uri, uri)\n if prefix_map is None:\n prefix_map = LocationPrefixRewriteMap_\n for (pfx, sub) in six.iteritems(prefix_map):\n if abs_uri.startswith(pfx):\n abs_uri = sub + abs_uri[len(pfx):]\n if 0 > abs_uri.find(':'):\n abs_uri = os.path.realpath(abs_uri)\n return abs_uri",
"def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"",
"def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result",
"def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")",
"def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")",
"def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")",
"def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url",
"def _update_url_scheme(self, url):\n if self.base_scheme and not url.startswith(\"%s://\" % self.base_scheme):\n # url_split = urlparse.urlsplit(url)\n url_split = urlsplit(url)\n # url = urlparse.urlunsplit(\n url = urlunsplit(\n [\n self.base_scheme,\n url_split.netloc,\n url_split.path,\n url_split.query,\n url_split.fragment\n ]\n )\n return url",
"def test_dont_percent_encode_safe_chars_query():\n assert (normalize_url(\"http://example.com/a/?face=(-.-)\") ==\n \"http://example.com/a?face=(-.-)\")",
"def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))"
] |
[
"0.7852068",
"0.7516662",
"0.74561626",
"0.7409289",
"0.71349496",
"0.7097236",
"0.6995842",
"0.68973535",
"0.6676458",
"0.65969145",
"0.6584086",
"0.65773904",
"0.6558476",
"0.65522295",
"0.6540962",
"0.64854604",
"0.64476895",
"0.6379877",
"0.6377486",
"0.6352593",
"0.6327102",
"0.6302892",
"0.62889",
"0.62286705",
"0.6222184",
"0.61811054",
"0.61796737",
"0.61454004",
"0.60793006",
"0.6074201"
] |
0.7584172
|
1
|
Generate signature base string from request.
|
def generate_signature_base_string(request):
host = request.headers.get('Host', None)
return construct_base_string(
request.method, request.uri, request.params, host)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _GenerateSignatureBaseString(self, method, request_url_base, params):\n return self._EscapeAndJoin([method, request_url_base,\n self._FormatUrlParams(params)])",
"def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")",
"def _buildSignatureString(self):\n self.params=self.kargs\n \n try: method_details=self.MMAP[self.method]\n except: \n raise RuntimeError(\"unsupported method\")\n \n api_key_required=method_details[\"api_key_required\"]\n if api_key_required:\n self.params.update({\"api_key\": self.API_KEY, \"method\":self.method})\n \n signature_required=method_details[\"signature_required\"]\n if not signature_required:\n self.signature_string=\"\"\n return\n \n sorted_keys=self.params.keys().sort()\n \n str=\"\"\n try:\n for key in sorted_keys:\n if key not in self.PARAMS_TO_EXCLUDE_FROM_SIGNATURE:\n \n ## assume the parameter's value is valid\n try: \n if key not in self.PARAMS_TO_EXCLUDE_FROM_UTF8_ENCODING:\n value=self.params[key].encode(\"utf-8\")\n else:\n value=self.params[key]\n except: value=self.params[key]\n str=\"%s%s\" % (key, value)\n except:\n pass\n \n str += self.API_SECRET\n m=hashlib.md5()\n m.update(str)\n self.signature_string=m.hexdigest()\n \n self.kargs.update({\"api_sig\": self.signature_string})",
"def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id",
"def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]",
"def signing_base(self, request, consumer, token):\r\n sig = (\r\n escape(request.method),\r\n escape(OAuthHook.get_normalized_url(request.url)),\r\n escape(OAuthHook.get_normalized_parameters(request)),\r\n )\r\n\r\n key = '%s&' % escape(consumer.secret)\r\n if token is not None:\r\n key += escape(token.secret)\r\n raw = '&'.join(sig)\r\n return key, raw",
"def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()",
"def _gen_api_sig(self, endpoint: str) -> str:\n return hmac.new(self._api_secret.encode(),\n endpoint.encode(),\n hashlib.sha512).hexdigest()",
"def _sign(self, oauth_payload, request):\n\t\t# merge params\n\t\t# use oauth_payload to update request params might avoid \n\t\t# some oauth params's accidental overriding\n\t\tpayload = dict( request.params )\n\t\tpayload.update( oauth_payload )\n\n\t\t# here I assume that all keys contain only 'a-zA-Z_.-'\n\t\t# thus there is no necessity to percent-encode them\n\t\t# will now sort them according to their original value\n\n\t\tkeylist = sorted( payload.keys() )\n\t\trawlist = []\n\t\tfor k in keylist:\n\t\t\tencoded_value = percent_encode( payload[k] )\n\t\t\trawlist.append( \"%s=%s\" % (k, encoded_value) )\n\n\t\t# craft base string\n\t\tbase_string = request.method.upper()\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode(request.base_url)\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode( '&'.join( rawlist ) )\n\n\t\tself._print( \"Base string:\\n\" + base_string )\n\t\t# craft signing key\n\t\tif self.has_user():\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.a_secret) )\n\t\telse:\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.token_secret) )\n\n\t\t# sign base_string\n\t\thashed = hmac.new(signing_key, base_string, hashlib.sha1)\n\t\tsignature = binascii.b2a_base64(hashed.digest())[:-1]\n\t\t\n\t\t# append signature field\n\t\toauth_payload[\"oauth_signature\"] = signature\n\n\t\t# prepare relevant oauth values\n\t\toauth_entry = []\n\t\tfor k in oauth_payload.keys():\n\t\t\tencoded_value = percent_encode( oauth_payload[k] )\n\t\t\toauth_entry.append( '%s=\"%s\"' % (k, encoded_value) )\n\n\t\toauth_str = 'OAuth ' + ','.join(oauth_entry)\n\t\tself._print( \"OAuth header:\\n\" + oauth_str )\n\t\t# field crafted\n\t\treturn { \"Authorization\" : oauth_str }",
"def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature",
"def calculate_client_signature(self):\n # NB: do not check for self.user_agent, because it can be empty.\n if self.ip_address and self.path_qs:\n resource = self.path_qs\n user_agent = self.user_agent\n key_max_size = 250\n\n # At least 40 characters are allocated to the resource part.\n res_min_size = 40\n res_max_size = key_max_size - len(self.ip_address) - len(\n resource) - 2\n\n if res_max_size < res_min_size:\n res_max_size = res_min_size\n\n if len(resource) > res_max_size:\n resource = resource[:res_max_size]\n\n # The remaining length is available for the User Agent part\n ua_max_size = (\n key_max_size - len(self.ip_address) - len(resource) - 2)\n if len(user_agent) > ua_max_size:\n user_agent = user_agent[:ua_max_size]\n\n key = \"%s#%s#%s\" % (self.ip_address, user_agent, resource)\n\n return key\n return ''",
"def get_signature_prefix(self, sig):\n return ''",
"def __sign(self, request_type, endpoint, content=None):\n\t\trequest = request_type + \"\\n\" + endpoint + \"\\n\" + content\n\t\tmac = hmac.new(\n\t\t\tself.api_secret.encode('utf-8'),\n\t\t\trequest.encode('utf-8'),\n\t\t\tdigestmod=hashlib.sha256\n\t\t).hexdigest()\n\t\treturn base64.b64encode(mac.encode('utf-8'))",
"def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)",
"def _generate_signature(self):\n self.logger.debug(f'body payload {self.body_payload}')\n return hmac.new(self.__decrypted_secret, self.body_payload, hashlib.sha1).hexdigest()",
"def build_signature(method, url, oauth_params, params={}):\n\t# Copy params to prevent modification from original params\n\tall_params = copy.deepcopy(oauth_params)\n\t# Combine OAuth parameters and original parameters\n\tall_params.update(params)\n\t# Sort, stringify, and encode all parameters\n\tkeys = sorted(all_params.keys())\n\tencoded_params = ''\n\tfor key in keys:\n\t\tencoded_params += key+'='+percent_encode(str(all_params[key]))+'&'\n\tencoded_params = encoded_params[:-1]\n\tbase_string = method.upper()+'&'+percent_encode(url)+'&'+percent_encode(encoded_params)\n\t# Request crypt calculation to the server and return caluculated value\n\tcalc_url = 'https://www.ryotosaito.com/shielld/calc_signature.php'\n\toauth_token_secret = users[user_name]['oauth_token_secret'] if user_name in users else ''\n\tparams = {'base_string' : base_string, 'oauth_token_secret' : oauth_token_secret}\n\trequest = requests.post(calc_url, params);\n\treturn request.text",
"def _get_signature(self, timestamp: int or str):\n # Key is fixed.\n ha = hmac.new(key=b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)\n grant_type = self.login_data['grant_type']\n client_id = self.login_data['client_id']\n source = self.login_data['source']\n ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))\n return ha.hexdigest()",
"def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)",
"def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64",
"def calculate_signature(self, request_date, host, endpoint, params, headers, method, payload=\"\", time=time):\n\n algorithm = params['SignatureMethod']\n\n canonical_request = self.get_canonical_string(request_date, host, endpoint, params, headers, method, payload)\n string_to_sign = self.get_string_to_sign(algorithm, request_date, canonical_request)\n\n request_date_simple = request_date[:8]\n\n digestmod = hashlib.sha256\n kdate = hmac.new(('AWS4' + self._secret_key).encode(), request_date_simple.encode(), digestmod).digest()\n kregion = hmac.new(kdate, self._region.encode(), digestmod).digest()\n kservice = hmac.new(kregion, self._service.encode(), digestmod).digest()\n ksigning = hmac.new(kservice, self._request_scope.encode(), digestmod).digest()\n\n signature = hmac.new(ksigning, string_to_sign.encode(), digestmod).digest()\n\n return binascii.hexlify(signature)",
"def _sign_request(secret, method, url, timestamp, content_hash=None):\n message = f'{timestamp}{url}{method}{content_hash}'\n\n return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest()",
"def build_key_signature(request_meta, hash_result=False):\n key_sig = list()\n\n # Build the key signature -- These keys must exist\n for key_name in REQUEST_META_BASE:\n key = getattr(request_meta, key_name)\n if key:\n key_sig.append(key_name + HASH_KEY_DELIMETER + key)\n else:\n logging.error(__name__ + ' :: Request must include %s. '\n 'Cannot set data %s.' %\n (key_name, str(request_meta)))\n return ''\n # These keys may optionally exist\n for key_name in REQUEST_META_QUERY_STR:\n if hasattr(request_meta, key_name):\n key = getattr(request_meta, key_name)\n if key:\n key_sig.append(key_name + HASH_KEY_DELIMETER + str(key))\n\n if hash_result:\n return sha1(str(key_sig).encode('utf-8')).hexdigest()\n else:\n return key_sig",
"def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()",
"def get_mac_signature(request, secret, params=None):\n if params is None:\n params = parse_authz_header(request, {})\n sigstr = get_normalized_request_string(request, params)\n return b64encode(hmac.new(secret, sigstr, sha1).digest())",
"def get_signature_xml() -> str:\n return render_to_string(\"saml/xml/signature.xml\", {})",
"def sign_hmac_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return hmac_sha1_signature(\n base_string, client.client_secret, client.token_secret)",
"def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)",
"def sign_request(request, key, iv):\n hashed_request = SHA1.new(bytes(json.dumps(request), \"ASCII\"))\n\n cipher = AES.new(b16decode(key), AES.MODE_CBC, b16decode(iv))\n ciphertext = cipher.encrypt(pad(hashed_request.digest(), 16))\n\n return b64encode(ciphertext)",
"def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))",
"def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)"
] |
[
"0.81524706",
"0.7412255",
"0.7397509",
"0.71844584",
"0.71188706",
"0.7089657",
"0.6939644",
"0.6879352",
"0.68650514",
"0.68331236",
"0.68296903",
"0.67501324",
"0.67149013",
"0.6704014",
"0.6614577",
"0.65598744",
"0.65263504",
"0.6508957",
"0.6507487",
"0.6457547",
"0.64513576",
"0.6449186",
"0.6440109",
"0.64216846",
"0.64200264",
"0.63586915",
"0.63532794",
"0.6321912",
"0.62424684",
"0.62320715"
] |
0.8285932
|
0
|
Generate signature via PLAINTEXT method, per `Section 3.4.4`_. The "PLAINTEXT" method does not employ a signature algorithm. It MUST be used with a transportlayer mechanism such as TLS or SSL (or sent over a secure channel with equivalent protections). It does not utilize the signature base string or the "oauth_timestamp" and "oauth_nonce" parameters.
|
def plaintext_signature(client_secret, token_secret):
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature = escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature += escape(token_secret or '')
return signature
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GenSampleSignature(text):\r\n demo_keypair = ('RSA.mVgY8RN6URBTstndvmUUPb4UZTdwvwmddSKE5z_jvKUEK6yk1'\r\n 'u3rrC9yN8k6FilGj9K0eeUPe2hf4Pj-5CmHww=='\r\n '.AQAB'\r\n '.Lgy_yL3hsLBngkFdDw1Jy9TmSRMiH6yihYetQ8jy-jZXdsZXd8V5'\r\n 'ub3kuBHHk4M39i3TduIkcrjcsiWQb77D8Q==')\r\n\r\n signer = SignatureAlgRsaSha256(demo_keypair)\r\n return signer.Sign(text)",
"def create_signature(auth_scheme, api_key_secret, signing_data, timestamp, nonce):\n if auth_scheme == 'VERACODE-HMAC-SHA-256':\n signature = create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce)\n else:\n raise UnsupportedAuthSchemeException('Auth scheme {auth_scheme} not supported'.format(auth_scheme=auth_scheme))\n return signature",
"def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)",
"def _get_signature(self, timestamp: int or str):\n # Key is fixed.\n ha = hmac.new(key=b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)\n grant_type = self.login_data['grant_type']\n client_id = self.login_data['client_id']\n source = self.login_data['source']\n ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))\n return ha.hexdigest()",
"def sign_plaintext(client, request):\n return plaintext_signature(client.client_secret, client.token_secret)",
"def _buildSignatureString(self):\n self.params=self.kargs\n \n try: method_details=self.MMAP[self.method]\n except: \n raise RuntimeError(\"unsupported method\")\n \n api_key_required=method_details[\"api_key_required\"]\n if api_key_required:\n self.params.update({\"api_key\": self.API_KEY, \"method\":self.method})\n \n signature_required=method_details[\"signature_required\"]\n if not signature_required:\n self.signature_string=\"\"\n return\n \n sorted_keys=self.params.keys().sort()\n \n str=\"\"\n try:\n for key in sorted_keys:\n if key not in self.PARAMS_TO_EXCLUDE_FROM_SIGNATURE:\n \n ## assume the parameter's value is valid\n try: \n if key not in self.PARAMS_TO_EXCLUDE_FROM_UTF8_ENCODING:\n value=self.params[key].encode(\"utf-8\")\n else:\n value=self.params[key]\n except: value=self.params[key]\n str=\"%s%s\" % (key, value)\n except:\n pass\n \n str += self.API_SECRET\n m=hashlib.md5()\n m.update(str)\n self.signature_string=m.hexdigest()\n \n self.kargs.update({\"api_sig\": self.signature_string})",
"def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature",
"def sign(self, body, external_aad, private_key):",
"def _sign_request(secret, method, url, timestamp, content_hash=None):\n message = f'{timestamp}{url}{method}{content_hash}'\n\n return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest()",
"def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id",
"def signing_base(self, request, consumer, token):\r\n sig = (\r\n escape(request.method),\r\n escape(OAuthHook.get_normalized_url(request.url)),\r\n escape(OAuthHook.get_normalized_parameters(request)),\r\n )\r\n\r\n key = '%s&' % escape(consumer.secret)\r\n if token is not None:\r\n key += escape(token.secret)\r\n raw = '&'.join(sig)\r\n return key, raw",
"def generate_cybersource_sa_signature(payload):\n # This is documented in certain CyberSource sample applications:\n # http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_SOP/html/wwhelp/wwhimpl/js/html/wwhelp.htm#href=creating_profile.05.6.html\n keys = payload[\"signed_field_names\"].split(\",\")\n message = \",\".join(f\"{key}={payload[key]}\" for key in keys)\n\n digest = hmac.new(\n settings.CYBERSOURCE_SECURITY_KEY.encode(\"utf-8\"),\n msg=message.encode(\"utf-8\"),\n digestmod=hashlib.sha256,\n ).digest()\n\n return b64encode(digest).decode(\"utf-8\")",
"def _GenerateSignatureBaseString(self, method, request_url_base, params):\n return self._EscapeAndJoin([method, request_url_base,\n self._FormatUrlParams(params)])",
"def build_signature(method, url, oauth_params, params={}):\n\t# Copy params to prevent modification from original params\n\tall_params = copy.deepcopy(oauth_params)\n\t# Combine OAuth parameters and original parameters\n\tall_params.update(params)\n\t# Sort, stringify, and encode all parameters\n\tkeys = sorted(all_params.keys())\n\tencoded_params = ''\n\tfor key in keys:\n\t\tencoded_params += key+'='+percent_encode(str(all_params[key]))+'&'\n\tencoded_params = encoded_params[:-1]\n\tbase_string = method.upper()+'&'+percent_encode(url)+'&'+percent_encode(encoded_params)\n\t# Request crypt calculation to the server and return caluculated value\n\tcalc_url = 'https://www.ryotosaito.com/shielld/calc_signature.php'\n\toauth_token_secret = users[user_name]['oauth_token_secret'] if user_name in users else ''\n\tparams = {'base_string' : base_string, 'oauth_token_secret' : oauth_token_secret}\n\trequest = requests.post(calc_url, params);\n\treturn request.text",
"def _oauth_signature(consumer_token, method, url, parameters={}, token=None):\n parts = urlparse.urlparse(url)\n scheme, netloc, path = parts[:3]\n normalized_url = scheme.lower() + \"://\" + netloc.lower() + path\n\n base_elems = []\n base_elems.append(method.upper())\n base_elems.append(normalized_url)\n base_elems.append(\"&\".join(\"%s=%s\" % (k, _oauth_escape(str(v)))\n for k, v in sorted(parameters.items())))\n base_string = \"&\".join(_oauth_escape(e) for e in base_elems)\n\n key_elems = [consumer_token[\"secret\"]]\n key_elems.append(token[\"secret\"] if token else \"\")\n key = \"&\".join(key_elems)\n\n hash = hmac.new(key, base_string, hashlib.sha1)\n return binascii.b2a_base64(hash.digest())[:-1]",
"def __sign(self, request_type, endpoint, content=None):\n\t\trequest = request_type + \"\\n\" + endpoint + \"\\n\" + content\n\t\tmac = hmac.new(\n\t\t\tself.api_secret.encode('utf-8'),\n\t\t\trequest.encode('utf-8'),\n\t\t\tdigestmod=hashlib.sha256\n\t\t).hexdigest()\n\t\treturn base64.b64encode(mac.encode('utf-8'))",
"def PCTSignatures_create(initSampleCount=None, initSeedCount=None, pointDistribution=None): # real signature unknown; restored from __doc__\n pass",
"def _sign(self, oauth_payload, request):\n\t\t# merge params\n\t\t# use oauth_payload to update request params might avoid \n\t\t# some oauth params's accidental overriding\n\t\tpayload = dict( request.params )\n\t\tpayload.update( oauth_payload )\n\n\t\t# here I assume that all keys contain only 'a-zA-Z_.-'\n\t\t# thus there is no necessity to percent-encode them\n\t\t# will now sort them according to their original value\n\n\t\tkeylist = sorted( payload.keys() )\n\t\trawlist = []\n\t\tfor k in keylist:\n\t\t\tencoded_value = percent_encode( payload[k] )\n\t\t\trawlist.append( \"%s=%s\" % (k, encoded_value) )\n\n\t\t# craft base string\n\t\tbase_string = request.method.upper()\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode(request.base_url)\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode( '&'.join( rawlist ) )\n\n\t\tself._print( \"Base string:\\n\" + base_string )\n\t\t# craft signing key\n\t\tif self.has_user():\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.a_secret) )\n\t\telse:\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.token_secret) )\n\n\t\t# sign base_string\n\t\thashed = hmac.new(signing_key, base_string, hashlib.sha1)\n\t\tsignature = binascii.b2a_base64(hashed.digest())[:-1]\n\t\t\n\t\t# append signature field\n\t\toauth_payload[\"oauth_signature\"] = signature\n\n\t\t# prepare relevant oauth values\n\t\toauth_entry = []\n\t\tfor k in oauth_payload.keys():\n\t\t\tencoded_value = percent_encode( oauth_payload[k] )\n\t\t\toauth_entry.append( '%s=\"%s\"' % (k, encoded_value) )\n\n\t\toauth_str = 'OAuth ' + ','.join(oauth_entry)\n\t\tself._print( \"OAuth header:\\n\" + oauth_str )\n\t\t# field crafted\n\t\treturn { \"Authorization\" : oauth_str }",
"def _gen_api_sig(self, endpoint: str) -> str:\n return hmac.new(self._api_secret.encode(),\n endpoint.encode(),\n hashlib.sha512).hexdigest()",
"def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature",
"def create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce):\n key_nonce = \\\n hmac.new(codecs.decode(api_key_secret, 'hex_codec'), codecs.decode(nonce, 'hex_codec'), sha256).digest()\n key_date = hmac.new(key_nonce, str(timestamp).encode(), sha256).digest()\n signature_key = hmac.new(key_date, u'vcode_request_version_1'.encode(), sha256).digest()\n return hmac.new(signature_key, signing_data.encode(), sha256).hexdigest()",
"def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]",
"def generate(cls, user, service):\n string = \"{}:{}\".format(user, service)\n key = cls.get_key()\n signature = base64.b64encode(hmac.new(key, msg=string, digestmod=hashlib.sha256).digest())\n return \"{}:{}:{}\".format(user, service, signature)",
"def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):\r\n message = signing_format_message(method, headers_dict, body_dict)\r\n\r\n # hmac needs a byte string for it's starting key, can't be unicode.\r\n hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)\r\n signature = binascii.b2a_base64(hashed.digest()).rstrip('\\n')\r\n authorization_header = \"SSI {}:{}\".format(access_key, signature)\r\n\r\n message += '\\n'\r\n return message, signature, authorization_header",
"def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)",
"def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()",
"def get_signature_xml() -> str:\n return render_to_string(\"saml/xml/signature.xml\", {})",
"def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"",
"def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature",
"def create_id_nonce_signature(\n cls, *, signature_inputs: TSignatureInputs, private_key: bytes,\n ) -> bytes:\n ..."
] |
[
"0.6079742",
"0.5759598",
"0.5742009",
"0.5716066",
"0.5678872",
"0.5528024",
"0.5516478",
"0.5481387",
"0.5392312",
"0.5375917",
"0.53083646",
"0.528393",
"0.5281056",
"0.52782965",
"0.5267875",
"0.5240674",
"0.52330804",
"0.520887",
"0.52030885",
"0.5187856",
"0.51490825",
"0.5148232",
"0.5132759",
"0.5111562",
"0.5107712",
"0.5099871",
"0.5076937",
"0.50236946",
"0.501798",
"0.5016861"
] |
0.5771276
|
1
|
Sign a HMACSHA1 signature.
|
def sign_hmac_sha1(client, request):
base_string = generate_signature_base_string(request)
return hmac_sha1_signature(
base_string, client.client_secret, client.token_secret)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)",
"def hashAndSign(self, bytes):\r\n hashBytes = SHA1(bytearray(bytes))\r\n prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)\r\n sigBytes = self.sign(prefixedHashBytes)\r\n return sigBytes",
"def sign(key, data):\n h = hmac.new(key, data.encode('utf8'), sha1)\n return base64.b64encode(h.digest())",
"def hmac_sha1_signature(base_string, client_secret, token_secret):\n\n # The HMAC-SHA1 function variables are used in following way:\n\n # text is set to the value of the signature base string from\n # `Section 3.4.1.1`_.\n #\n # .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1\n text = base_string\n\n # key is set to the concatenated values of:\n # 1. The client shared-secret, after being encoded (`Section 3.6`_).\n #\n # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6\n key = escape(client_secret or '')\n\n # 2. An \"&\" character (ASCII code 38), which MUST be included\n # even when either secret is empty.\n key += '&'\n\n # 3. The token shared-secret, after being encoded (`Section 3.6`_).\n #\n # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6\n key += escape(token_secret or '')\n\n signature = hmac.new(to_bytes(key), to_bytes(text), hashlib.sha1)\n\n # digest is used to set the value of the \"oauth_signature\" protocol\n # parameter, after the result octet string is base64-encoded\n # per `RFC2045, Section 6.8`.\n #\n # .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8\n sig = binascii.b2a_base64(signature.digest())[:-1]\n return to_unicode(sig)",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)",
"def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()",
"def sign(self, encoded):\n signature = self._hmac.copy()\n signature.update(encoded)\n return signature.hexdigest().encode('utf-8')",
"def verify_hmac_sha1(request):\n base_string = generate_signature_base_string(request)\n sig = hmac_sha1_signature(\n base_string, request.client_secret, request.token_secret)\n return hmac.compare_digest(sig, request.signature)",
"def cookie_signature(self, *parts):\n sha1 = hmac.new(self._secret, digestmod=hashlib.sha1)\n for part in parts: \n sha1.update(part)\n return sha1.hexdigest()",
"def SHA1(self) -> _n_0_t_3[_n_0_t_9]:",
"def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')",
"def Sign(self, bytes_to_sign, logf=None):\r\n # Implements PKCS1-v1_5 w/SHA256 over the bytes, and returns\r\n # the result as a base64url encoded bignum.\r\n\r\n self._Log(logf, 'bytes_to_sign = [%s]' % bytes_to_sign.encode('hex'))\r\n\r\n self._Log(logf, 'keypair size : %s' % self.keypair.size())\r\n\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(bytes_to_sign, self.keypair.size(), logf)\r\n # TODO(jpanzer): Check whether we need to use max keysize above\r\n # or just keypair.size\r\n\r\n self._Log(logf, 'emsa_msg = [%s]' % emsa_msg.encode('hex'))\r\n\r\n # Compute the signature:\r\n signature_long = self.keypair.sign(emsa_msg, None)[0]\r\n\r\n # Encode the signature as armored text:\r\n signature_bytes = number.long_to_bytes(signature_long)\r\n\r\n self._Log(logf, 'signature_bytes = [%s]' % signature_bytes.encode('hex'))\r\n\r\n return base64.urlsafe_b64encode(signature_bytes).encode('utf-8')",
"def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')",
"def sign_hmac(self, sessionID):\r\n \r\n self.new_hmac = hmac.new(bytes(self.passphrase), self.encrypted_iv, hashlib.sha224)\r\n self.new_hmac.update(self.encrypted_nodeid)\r\n self.new_hmac.update(self.encrypted_data)\r\n self.new_hmac.update(sessionID)\r\n \r\n return self.new_hmac",
"def sha1(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha1\")",
"def get_signed(self, sig_str):\n sig_str = base64.b64encode(sig_str)\n signature = base64.b64encode(hmac.new(self.secret, sig_str, digestmod=hashlib.sha1).digest())\n return signature",
"def hex_sha1_of_bytes(data: bytes) -> Sha1HexDigest:\n return Sha1HexDigest(hashlib.sha1(data).hexdigest())",
"def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n return d.digest()",
"def verify_signature(request):\n\n secret = settings.GITHUB_WEBHOOK_SECRET\n header = request.headers.get(\"X-Hub-Signature\")\n\n if header is None:\n abort(403)\n\n if header[:5] != \"sha1=\":\n abort(403)\n\n signature = header[5:]\n\n mac = hmac.new(secret, msg=request.data, digestmod=\"sha1\")\n if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):\n abort(403)",
"def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64",
"def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature",
"def cookie_signature(seed, *parts):\n sha1 = hmac.new(seed, digestmod=hashlib.sha1)\n for part in parts:\n if part:\n sha1.update(part)\n return sha1.hexdigest()",
"def compute_signature(msg):\n hashkey = memcache.Client().get('CURL_TEST_SERVER_HASHKEY')\n h = hmac.new(hashkey, msg, hashlib.sha1)\n signature = urllib.quote(base64.b64encode(h.digest()))\n return signature",
"def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]",
"def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature",
"def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.wallet.private_key_rsa)\n response = dict(signature=signature)\n return jsonify(response), 200",
"def create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce):\n key_nonce = \\\n hmac.new(codecs.decode(api_key_secret, 'hex_codec'), codecs.decode(nonce, 'hex_codec'), sha256).digest()\n key_date = hmac.new(key_nonce, str(timestamp).encode(), sha256).digest()\n signature_key = hmac.new(key_date, u'vcode_request_version_1'.encode(), sha256).digest()\n return hmac.new(signature_key, signing_data.encode(), sha256).hexdigest()"
] |
[
"0.7322658",
"0.69608504",
"0.6795846",
"0.6739002",
"0.66943777",
"0.66635305",
"0.6322254",
"0.6264413",
"0.62420106",
"0.6241299",
"0.6170932",
"0.6157063",
"0.60972005",
"0.6070152",
"0.6054219",
"0.6030812",
"0.602599",
"0.59852517",
"0.5922484",
"0.5917426",
"0.59022945",
"0.5890591",
"0.5882104",
"0.5857109",
"0.5830347",
"0.5826169",
"0.58252895",
"0.5823457",
"0.581769",
"0.5815236"
] |
0.8078111
|
0
|
Sign a RSASSAPKCS 1 v1.5 base64 encoded signature.
|
def sign_rsa_sha1(client, request):
base_string = generate_signature_base_string(request)
return rsa_sha1_signature(base_string, client.rsa_key)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)",
"def base64sign(plaintext, private_key):\n shahash = SHA256.new(plaintext.encode('utf8'))\n signer = PKCS1_v1_5.new(private_key)\n signature_bytes = signer.sign(shahash)\n return base64.b64encode(signature_bytes)",
"def sign(key, data):\n h = hmac.new(key, data.encode('utf8'), sha1)\n return base64.b64encode(h.digest())",
"def Sign(self, data):\n return self.rsa_key.sign(data, padding.PKCS1v15(), utils.Prehashed(hashes.SHA1()))",
"def sign(self, data):\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return b64encode(signature)",
"def sign_with_rsa(private_key, data):\n if isinstance(data, six.text_type):\n encoded_data = data.encode('utf8')\n else:\n encoded_data = data\n\n signed_data = OpenSSL.crypto.sign(private_key, encoded_data, \"sha1\")\n return base64.b64encode(signed_data).decode('ascii')",
"def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')",
"def Sign(self, bytes_to_sign, logf=None):\r\n # Implements PKCS1-v1_5 w/SHA256 over the bytes, and returns\r\n # the result as a base64url encoded bignum.\r\n\r\n self._Log(logf, 'bytes_to_sign = [%s]' % bytes_to_sign.encode('hex'))\r\n\r\n self._Log(logf, 'keypair size : %s' % self.keypair.size())\r\n\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(bytes_to_sign, self.keypair.size(), logf)\r\n # TODO(jpanzer): Check whether we need to use max keysize above\r\n # or just keypair.size\r\n\r\n self._Log(logf, 'emsa_msg = [%s]' % emsa_msg.encode('hex'))\r\n\r\n # Compute the signature:\r\n signature_long = self.keypair.sign(emsa_msg, None)[0]\r\n\r\n # Encode the signature as armored text:\r\n signature_bytes = number.long_to_bytes(signature_long)\r\n\r\n self._Log(logf, 'signature_bytes = [%s]' % signature_bytes.encode('hex'))\r\n\r\n return base64.urlsafe_b64encode(signature_bytes).encode('utf-8')",
"def sign(self, msg, key):\n\n if not isinstance(key, ec.EllipticCurvePrivateKey):\n raise TypeError(\"The private key must be an instance of \" \"ec.EllipticCurvePrivateKey\")\n\n self._cross_check(key.public_key())\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n asn1sig = key.sign(msg, ec.ECDSA(self.hash_algorithm()))\n # Cryptography returns ASN.1-encoded signature data; decode as JWS\n # uses raw signatures (r||s)\n (r, s) = decode_dss_signature(asn1sig)\n return int.to_bytes(r, num_bytes, \"big\") + int.to_bytes(s, num_bytes, \"big\")",
"def sign(self, data):\n from base64 import urlsafe_b64encode\n\n if self.sign_private == \"\":\n raise ValueError(\"Error signing: No private signing key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return urlsafe_b64encode(signature)",
"def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64",
"def sign(self, data: bytes) -> bytes:\n return self._signing_key.sign(data).signature",
"def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)",
"def _rsa_sign(blob, private_key_pem):\n # Lazy import crypto. It is not available in unit tests outside of sandbox.\n from Crypto.Hash import SHA256\n from Crypto.PublicKey import RSA\n from Crypto.Signature import PKCS1_v1_5\n pkey = RSA.importKey(private_key_pem)\n return PKCS1_v1_5.new(pkey).sign(SHA256.new(blob))",
"def _rsassa_pkcs1_v1_5_sign(self, M, h):\n\n # 1) EMSA-PKCS1-v1_5 encoding\n k = self._modulusLen / 8\n EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EM is None:\n warning(\"Key._rsassa_pkcs1_v1_5_sign(): unable to encode\")\n return None\n\n # 2) RSA signature\n m = pkcs_os2ip(EM) # 2.a)\n s = self._rsasp1(m) # 2.b)\n S = pkcs_i2osp(s, k) # 2.c)\n\n return S # 3)",
"def sign(self, bytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPrivateKeyOp(m)\r\n sigBytes = numberToByteArray(c, numBytes(self.n))\r\n return sigBytes",
"def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")",
"def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id",
"def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature",
"def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig",
"def rsa_sign(message, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.sign)",
"def sign(self, digest):\n sig = self.private_key_obj.sign(digest, ec.ECDSA(utils.Prehashed(hashes.SHA256())))\n sig_rs = utils.decode_dss_signature(sig)\n sig_r = int.to_bytes(sig_rs[0], 32, \"big\")\n sig_s = int.to_bytes(sig_rs[1], 32, \"big\")\n return bytes(bytearray(sig_r)+bytearray(sig_s))",
"def get_signed(self, sig_str):\n sig_str = base64.b64encode(sig_str)\n signature = base64.b64encode(hmac.new(self.secret, sig_str, digestmod=hashlib.sha1).digest())\n return signature",
"def sign(priv_key: rsa.RSAPrivateKey, msg: bytes) -> Signature:\n return priv_key.sign(msg, PADDING, HASH)",
"def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]",
"def sign_hmac_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return hmac_sha1_signature(\n base_string, client.client_secret, client.token_secret)",
"def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])",
"def sign(self, encoded):\n signature = self._hmac.copy()\n signature.update(encoded)\n return signature.hexdigest().encode('utf-8')"
] |
[
"0.7349005",
"0.68615925",
"0.68418187",
"0.6789189",
"0.6698838",
"0.6683597",
"0.66149825",
"0.6538047",
"0.6537051",
"0.6440535",
"0.64177877",
"0.63638127",
"0.6333892",
"0.6318501",
"0.6309397",
"0.6236319",
"0.6219438",
"0.62098646",
"0.6176856",
"0.6170527",
"0.6128557",
"0.6107894",
"0.61029845",
"0.6036811",
"0.6008623",
"0.6006226",
"0.5994361",
"0.59669346",
"0.5960011",
"0.59585905"
] |
0.7381339
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.