query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Justified (name, value, units, doc) strings for active parameters.
def param_strs(self): name_len = max(len(p.name) for p in self) value_len = max(len(p.value_str) for p in self.params.values()) units_len = max(len(p.units) for p in self.params.values()) return [(p.name.ljust(name_len), p.value_str.ljust(value_len), p.units.ljust(units_len), p.__doc__) for p in self.params.values() if p]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n num_active = len([p for p in self if p])\n summary = \"%s has %d parameters with %d active (non-default)\" % \\\n (self.__class__.__name__, len(self), num_active)\n if num_active == 0:\n return summary\n return summary + ':\\n' + '\\n'.join(('%s = %s %s (%s)' % ps)\n for ps in self.param_strs())", "def __str__(self):\r\n res = [self.Name + ' parameters:']\r\n for t in self._tracked_properties:\r\n res.append(t + ':' + str(getattr(self, t)))\r\n for k, v in sorted(self.Params.items()):\r\n res.append(str(k) + ':' + str(v))\r\n return '\\n'.join(res)", "def display_parameters(self):\n l = []\n for param in self.parameters.all():\n if len(param.value) > 16:\n l.append(u\"{}={}...\".format(param.name, param.value[:16]))\n else:\n l.append(u\"{}={}\".format(param.name, param.value))\n return \"; \".join(l)", "def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]))\n\n # Build documentation for parameters\n for (on_param, param) in enumerate(self._parameters):\n if on_param > 0:\n docstring += '\\n'\n\n docstring += ' ' + param[0].ljust(maxlen + 2)\n doc = wrap(param[1], columns - maxlen - 4)\n padding = str('')\n for line in doc.split('\\n'):\n docstring += padding + line + '\\n'\n padding = str('').ljust(maxlen + 4)\n \n # Pull off the final '\\n'\n return docstring[0:len(docstring)-1]", "def __str__(self):\n return \"{}: {} params, wires {}\".format(self.name, len(self.params), self.wires)", "def format_freeform_params(self):\n return self.format_param_pairs(self.get_freeform_reg_params())", "def params_desc(self):\n return \"{}/{}/{}/{}\".format(\n self.learning_rate, self.movement, self.milestones, self.gamma\n )", "def _parameter_summary(self, parameters, parameters_to_show=4):\n params = parameters\n if len(parameters) > parameters_to_show:\n params = parameters[:2] + [\"...\"] + parameters[-2:]\n return \", \".join(params)", "def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def _format_parameterArray(self):\n return \"{%s; %s}\" % tuple(', '.join(str(x) for x in l)\n for l in self.parameterArray())", "def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail", "def _write_params(self, size):\n msg = []\n if self.params:\n msg = ['$PARAMS\\n']\n for (key, param) in sorted(self.params.iteritems()):\n msg.append(param.print_card(size))\n return ''.join(msg)", "def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))", "def __str__(self):\n return 'Tensor product {}: {} params, wires {}'.format([i.name for i in self.obs], len(self.params), self.wires)", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def format_params(self, params):\n return params", "def pretty(self):\n def arg_to_str(name, value):\n if value is True:\n return '+' + name\n elif value is False:\n return '~' + name\n elif isinstance(value, Var):\n if value.name == name:\n return '?' + value.name\n return name + \"=\" + value.name\n else:\n return name + \"=\" + repr(value)\n\n if len(self.args) == 0:\n return self.name\n return \"{}[{}]\".format(self.name,\n \", \".join(arg_to_str(name, value)\n for name, value in self.args))", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n return '\\tCode: %(code)d (0x%(code).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'code': self.code, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def summary_parameters(self):\n text = re.sub(r'\\r?\\n', ' - ', self.opt.text[:200])\n return {'adding': text}" ]
[ "0.7582478", "0.6667753", "0.65951604", "0.6493384", "0.64455795", "0.64114326", "0.63715106", "0.62896985", "0.6242153", "0.62383384", "0.62208116", "0.62207097", "0.61746264", "0.6109571", "0.61072844", "0.6098375", "0.60816747", "0.6051815", "0.6050969", "0.60330296", "0.6029222", "0.6026988", "0.59995276", "0.5982199", "0.5974766", "0.59525067", "0.5951336", "0.5951336", "0.5951253", "0.59383744" ]
0.7280672
1
Base hash on description string, just like equality operator.
def __hash__(self): return hash(self.description)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash(self) -> str:\r\n ...", "def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))", "def __hash__(self):\n return hash(self.text)", "def hash(self) -> bytes:", "def hash(self, string):\n return self.__scaffydb.hash(string)", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def hash_string(self):\n return self._hash_string", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash((self.title, self.isbn))", "def __hash__(self):\n return hash(self.literals)", "def __hash__(self):\n\t\treturn hash(repr(self))", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value", "def __hash__(self):\n return hash((self.SYMBOL, self._.hash_parameters))", "def __hash__(self):\n return hash((self._start, self._end, self._name, self._value))", "def __hash__(self):\n return hash(self.hash)", "def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()", "def __hash__(self):\n return hash(self.name)", "def __hash__(self):\n return hash(self.name)", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"" ]
[ "0.72970223", "0.70238286", "0.6990711", "0.68921405", "0.6877547", "0.6868672", "0.6868672", "0.6868672", "0.6853107", "0.6853107", "0.6853107", "0.6853107", "0.6820892", "0.68152857", "0.67963797", "0.67854613", "0.67854613", "0.67854613", "0.6765069", "0.67579234", "0.6742125", "0.6736698", "0.670241", "0.6683789", "0.6656659", "0.6606796", "0.6572969", "0.6572969", "0.6533307", "0.6531844" ]
0.8038765
0
Convert 2D alignment parameters (alpha, sx, sy, mirror) into 3D alignment parameters (phi, theta, psi, s2x, s2y, mirror)
def params_2D_3D(alpha, sx, sy, mirror): phi = 0 psi = 0 theta = 0 alphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1) if mirror > 0: phi = (540.0 + phi)%360.0 theta = 180.0 - theta psi = (540.0 - psi + alphan)%360.0 else: psi = (psi + alphan)%360.0 return phi, theta, psi, s2x, s2y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def get_affine_matrix3d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angles: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n ang_deg, d = vs_params # degrees, nm\n ang_rad = np.deg2rad(ang_deg) # retrieve radians\n d = d * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij\n traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (\n comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def vs3_func_1(ns, traj, vs_def_beads_ids, vs_params):\n\n i, j, k = vs_def_beads_ids\n a, b = vs_params # nm, nm\n a, b = a * 10, b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij) / 2 + b * r_ik / mda.lib.mdamath.norm(r_ik) / 2", "def get_affine_matrix3d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angles: torch.Tensor,\n sxy: Optional[torch.Tensor] = None,\n sxz: Optional[torch.Tensor] = None,\n syx: Optional[torch.Tensor] = None,\n syz: Optional[torch.Tensor] = None,\n szx: Optional[torch.Tensor] = None,\n szy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def eq_to_3d(ra, dec):\r\n x = np.cos(ra) * np.cos(dec)\r\n y = np.sin(ra) * np.cos(dec)\r\n z = np.sin(dec)\r\n return x, y, z", "def ancmig_adj_3(params, ns):\n #8 parameters \n nu1, nuA, nu2, nu3, m1_1, T1a, T1b, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1a, m=mig1)\n fs.integrate(nu_T1, T1b) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def transform_params(cls, orion_params, space):\n ax_params = {}\n for dim in space.values():\n if dim.type == \"fidelity\":\n continue\n\n ax_params[dim.name] = orion_params[dim.name]\n\n return ax_params", "def to_se3(self, state: Vector) -> RigidTransform:\n return (state[:9],state[9:12])" ]
[ "0.75017375", "0.6478277", "0.647607", "0.63587636", "0.60074914", "0.5803451", "0.5758167", "0.5752519", "0.5700443", "0.5646637", "0.56198543", "0.5572641", "0.5455936", "0.54369533", "0.5418458", "0.5403827", "0.539565", "0.5376442", "0.53718555", "0.53706646", "0.53607404", "0.5297691", "0.5282178", "0.5258941", "0.525507", "0.5244696", "0.5231194", "0.5214326", "0.5208538", "0.52048063" ]
0.78717786
0
Convert 3D alignment parameters (phi, theta, psi, s2x, s2y) there is no mirror in 3D! into 2D alignment parameters (alpha, sx, sy, mirror)
def params_3D_2D(phi, theta, psi, s2x, s2y): if theta > 90.0: mirror = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: mirror = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, mirror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def vs3_func_1(ns, traj, vs_def_beads_ids, vs_params):\n\n i, j, k = vs_def_beads_ids\n a, b = vs_params # nm, nm\n a, b = a * 10, b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij) / 2 + b * r_ik / mda.lib.mdamath.norm(r_ik) / 2", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def get_affine_matrix3d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angles: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def xyz_to_zyx(xyz1, xyz2, xyz3):\n\n # Converto gli angoli in ingresso in gradi\n xyz1_r = xyz1 / 180.0 * Kinematic.M_PI\n xyz2_r = xyz2 / 180.0 * Kinematic.M_PI\n xyz3_r = xyz3 / 180.0 * Kinematic.M_PI\n\n # Calcolo l'elemento 3:1 della prima matrice (s1s3 - c1c3s2)\n minus_s2_xyz = (math.sin(xyz1_r) * math.sin(xyz3_r)) - (math.cos(xyz1_r) * math.cos(xyz3_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:1 della prima matrice (c1s3 + c3s1s2)\n c2s1_xyz = (math.cos(xyz1_r) * math.sin(xyz3_r)) + (math.cos(xyz3_r) * math.sin(xyz1_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:3 della prima matrice (c3s1 + c1s2s3)\n c2s3_xyz = (math.cos(xyz3_r) * math.sin(xyz1_r)) + (math.cos(xyz1_r)) - (math.sin(xyz2_r) * math.sin(xyz3_r))\n\n # Ora trovo gli angoli\n zyx2_r = math.asin(-minus_s2_xyz)\n c2_xyz = math.cos(zyx2_r)\n zyx1_r = math.asin(c2s1_xyz / c2_xyz)\n zyx3_r = math.asin(c2s3_xyz / c2_xyz)\n zyx3 = zyx3_r / Kinematic.M_PI * 180.0\n zyx2 = zyx2_r / Kinematic.M_PI * 180.0\n zyx1 = zyx1_r / Kinematic.M_PI * 180.0\n\n return [zyx3, zyx2, zyx1, zyx3_r, zyx2_r, zyx1_r]", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def affine_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat3(0)\r\n M2 = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n ang_deg, d = vs_params # degrees, nm\n ang_rad = np.deg2rad(ang_deg) # retrieve radians\n d = d * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij\n traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (\n comb_ijk / mda.lib.mdamath.norm(comb_ijk))" ]
[ "0.76915425", "0.6404001", "0.6382034", "0.6108902", "0.6060465", "0.6017704", "0.57541436", "0.5705645", "0.5625745", "0.5595656", "0.5591025", "0.55300176", "0.5473145", "0.53983897", "0.53954655", "0.5385849", "0.53646827", "0.53643596", "0.536235", "0.5350919", "0.5346317", "0.53293073", "0.53148276", "0.53096694", "0.5309128", "0.52827907", "0.5273503", "0.5267859", "0.52617663", "0.525534" ]
0.7371874
1
Commented by Zhengfan Yang on 05/01/07 I made some change to the original amoeba so that it can now pass out some values calculated by func other than the criteria. This is important in multilevel amoeba refinement because otherwise, upper level refinement will lose the information of lower level refinement.
def amoeba_multi_level(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None): #print " ENTER AMOEBA MULTI LEVEL" nvar = len(var) # number of variables in the minimization nsimplex = nvar + 1 # number of vertices in the simplex # first set up the simplex simplex = [0]*(nvar+1) # set the initial simplex simplex[0] = var[:] for i in xrange(nvar): simplex[i+1] = var[:] simplex[i+1][i] += scale[i] fvalue = [] for i in xrange(nsimplex): # set the function values for the simplex result, passout = func(simplex[i], data=data) #print " amoeba setting ",i,simplex[i],result, passout fvalue.append([result, passout]) # Ooze the simplex to the maximum iteration = 0 while 1: # find the index of the best and worst vertices in the simplex ssworst = 0 ssbest = 0 for i in xrange(nsimplex): if fvalue[i][0] > fvalue[ssbest][0]: ssbest = i if fvalue[i][0] < fvalue[ssworst][0]: ssworst = i # get the average of the nsimplex-1 best vertices in the simplex pavg = [0.0]*nvar for i in xrange(nsimplex): if i != ssworst: for j in range(nvar): pavg[j] += simplex[i][j] for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1 simscale = 0.0 for i in range(nvar): simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i] simscale = simscale/nvar # find the range of the function values fscale = (abs(fvalue[ssbest][0])+abs(fvalue[ssworst][0]))/2.0 if fscale != 0.0: frange = abs(fvalue[ssbest][0]-fvalue[ssworst][0])/fscale else: frange = 0.0 # all the fvalues are zero in this case # have we converged? if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum (xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough (itmax and iteration >= itmax)): # ran out of iterations return simplex[ssbest],fvalue[ssbest][0],iteration,fvalue[ssbest][1] # reflect the worst vertex pnew = [0.0]*nvar for i in xrange(nvar): pnew[i] = 2.0*pavg[i] - simplex[ssworst][i] fnew = func(pnew,data=data) if fnew[0] <= fvalue[ssworst][0]: # the new vertex is worse than the worst so shrink # the simplex. for i in xrange(nsimplex): if i != ssbest and i != ssworst: for j in xrange(nvar): simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j] fvalue[i] = func(simplex[i],data=data) for j in xrange(nvar): pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j] fnew = func(pnew, data=data) elif fnew[0] >= fvalue[ssbest][0]: # the new vertex is better than the best so expand # the simplex. pnew2 = [0.0]*nvar for i in xrange(nvar): pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i] fnew2 = func(pnew2,data=data) if fnew2[0] > fnew[0]: # accept the new vertex in the simplexe pnew = pnew2 fnew = fnew2 # replace the worst vertex with the new vertex for i in xrange(nvar): simplex[ssworst][i] = pnew[i] fvalue[ssworst] = fnew iteration += 1 #print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fit_amoeba(self, kwargs, verbose):\n\n args_init = self._param_class.kwargs_to_args(kwargs)\n\n options = {\n \"adaptive\": True,\n \"fatol\": self._tol_simplex_func,\n \"maxiter\": self._simplex_n_iterations * len(args_init),\n }\n\n method = \"Nelder-Mead\"\n\n if verbose:\n print(\"starting amoeba... \")\n\n opt = minimize(\n self.fast_rayshooting.chi_square,\n x0=args_init,\n method=method,\n options=options,\n )\n\n kwargs = self._param_class.args_to_kwargs(opt[\"x\"])\n source_penalty = opt[\"fun\"]\n\n return kwargs, source_penalty", "def psi(a):", "def f1_part_i(x,m_ind):\n\n #f = max(2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] ,2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1])\n tmp1 = 2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1]\n tmp2 = 2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n \n # checking absolute value of rho-b_i = rho1-rho2-b_i\n #if (tmp1-tmp2 > cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 1.0\n #if (tmp1-tmp2 == cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 0.5\n #else:\n # cfg.alpha[m_ind] = 0.0\n \n # checking maximum used in rho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n \n return f", "def amoeba(transform, parameters_tolerance=0.1, function_tolerance=0.0001, max_iterations=300, scales=None, initial_simplex_size=None):\n\n #\n # Iteration Observer\n #\n #def iterationUpdate():\n # print optimizer.GetInitialSimplexDelta()\n # print transform.GetParameters()\n \n optimizer = itk.AmoebaOptimizer.New()\n optimizer.MinimizeOn()\n # Medimax <-> Numerical Recipes in C\n # recalage/mtch_3d.c:get_facteur_precision\n # NORMAL : 1\n # PRECIS : 0.1\n # TRES_PRECIS : 0.01\n # PRECISION_MAX : 0.0001\n optimizer.SetParametersConvergenceTolerance(parameters_tolerance) # 1/10th pixel\n optimizer.SetFunctionConvergenceTolerance(function_tolerance) # 0.001 bits\n optimizer.SetMaximumNumberOfIterations(max_iterations)\n \n if initial_simplex_size is not None :\n optimizer.AutomaticInitialSimplexOff()\n delta = transform.GetNumberOfParameters()*(initial_simplex_size,) # the initial size of the simplex (initial_simplex_size units in each of the parameters)\n print delta\n optimizer.SetInitialSimplexDelta(delta)\n else :\n optimizer.AutomaticInitialSimplexOn()\n\n if scales is not None :\n optimizer.SetScales(scales)\n\n #iterationCommand = itk.PyCommand.New()\n #iterationCommand.SetCommandCallable( iterationUpdate )\n #optimizer.AddObserver( itk.IterationEvent(), iterationCommand.GetPointer() )\n \n return optimizer", "def apply(self):", "def solve(self):", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def alsace(func, N, jpdf, tol=1e-22, sample_type='R', limit_cond=100,\n max_fcalls=1000, seed=123, ed_file=None, ed_fevals_file=None,\n verbose=True, pce_dict={}):\n\n if not pce_dict: # if pce_dict is empty --> cold-start\n idx_act = []\n idx_act.append([0]*N) # start with 0 multi-index\n idx_adm = []\n # set seed\n ot.RandomGenerator.SetSeed(seed)\n ed_size = 2*N # initial number of samples\n # initial experimental design and coresponding evaluations\n ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,\n knots=[], values=[], ed_file=ed_file,\n ed_fevals_file=ed_fevals_file)\n global_error_indicator = 1.0 # give arbitrary sufficiently large value\n\n # get the distribution type of each random variable\n dist_types = []\n for i in range(N):\n dist_type = jpdf.getMarginal(i).getName()\n dist_types.append(dist_type)\n\n # create orthogonal univariate bases\n poly_collection = ot.PolynomialFamilyCollection(N)\n for i in range(N):\n pdf = jpdf.getDistributionCollection()[i]\n algo = ot.AdaptiveStieltjesAlgorithm(pdf)\n poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)\n\n # create multivariate basis\n mv_basis = ot.OrthogonalProductPolynomialFactory(\n poly_collection,\n ot.LinearEnumerateFunction(N)\n )\n # get enumerate function (multi-index handling)\n enum_func = mv_basis.getEnumerateFunction()\n\n else: # get data from dictionary\n idx_act = pce_dict['idx_act']\n idx_adm = pce_dict['idx_adm']\n pce_coeff_act = pce_dict['pce_coeff_act']\n pce_coeff_adm = pce_dict['pce_coeff_adm']\n ed = pce_dict['ed']\n ed_fevals = pce_dict['ed_fevals']\n ed_size = len(ed_fevals)\n # compute local and global error indicators\n global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)\n enum_func = pce_dict['enum_func']\n mv_basis = pce_dict['mv_basis']\n\n #\n while ed_size < max_fcalls and global_error_indicator > tol:\n # the index added last to the activated set is the one to be refined\n last_act_idx = idx_act[-1][:]\n # get admissible neighbors of the lastly added index\n adm_neighbors = admissible_neighbors(last_act_idx, idx_act)\n # update admissible indices\n idx_adm = idx_adm + adm_neighbors\n # get polynomial basis for the LS problem\n idx_ls = idx_act + idx_adm\n idx_ls_single = transform_multi_index_set(idx_ls, enum_func)\n ls_basis = mv_basis.getSubBasis(idx_ls_single)\n ls_basis_size = len(ls_basis)\n\n # construct the design matrix D and compute its QR decomposition and its\n # condition number\n D = get_design_matrix(ls_basis, ed)\n Q, R = sp.qr(D, mode='economic')\n condD = np.linalg.cond(R)\n\n # get largest eigenvalue of A^-1\n A = np.matmul(D.T, D) / ed_size\n# lambda_max=power_iteration(A,1000)\n# lambda_min=power_iteration(A-lambda_max*np.eye(A.shape[0]),10000)+lambda_max\n#\n# print('--------- power iteration ----------')\n# print('lambda max= ', lambda_max)\n# print('lambda min= ', lambda_min)\n# print('lambda max inv= ', 1./lambda_min)\n# print('--------- numpy ----------')\n# print('lambda max= ', max(np.linalg.eig(A)[0]))\n# print('lambda min= ', min(np.linalg.eig(A)[0]))\n# print('lambda max inv lambda= ', 1./min(np.linalg.eig(A)[0]))\n# print('lambda max inv A= ', max(np.linalg.eig(np.linalg.inv(A))[0]))\n# print('')\n# print('')\n eigA = 1./min(np.linalg.eig(A)[0])\n # If condD becomes too large, enrich the ED until condD becomes acceptable\n # or until ed_size reaches max_fcalls\n while (eigA > limit_cond and ed_size < max_fcalls) or ed_size < ls_basis_size:\n # inform user\n if verbose:\n print('WARNING: condition(D) = ' , condD)\n print('WARNING: lambda_max(A^-1) = ' , eigA)\n print(\"\")\n # select new size for the ED\n if ls_basis_size > ed_size:\n ed_size = ls_basis_size + N\n elif ed_size + N > max_fcalls:\n ed_size = max_fcalls\n else:\n ed_size = ed_size + N\n # expand ED\n ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,\n knots=ed, values=ed_fevals, ed_file=ed_file,\n ed_fevals_file=ed_fevals_file)\n # construct the design matrix D and compute its QR decomposition and its\n # condition number\n D = get_design_matrix(ls_basis, ed)\n Q, R = sp.qr(D,mode='economic')\n condD = np.linalg.cond(R)\n A = np.matmul(D.T, D) / ed_size\n eigA = 1./min(np.linalg.eig(A)[0])\n\n # solve LS problem\n c = Q.T.dot(ed_fevals)\n pce_coeff_ls = sp.solve_triangular(R, c)\n\n # find the multi-index with the largest contribution, add it to idx_act\n # and delete it from idx_adm\n pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()\n pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()\n help_idx = np.argmax(np.abs(pce_coeff_adm))\n idx_add = idx_adm.pop(help_idx)\n pce_coeff_add = pce_coeff_adm.pop(help_idx)\n idx_act.append(idx_add)\n pce_coeff_act.append(pce_coeff_add)\n\n # store expansion data in dictionary\n pce_dict = {}\n pce_dict['idx_act'] = idx_act\n pce_dict['idx_adm'] = idx_adm\n pce_dict['pce_coeff_act'] = pce_coeff_act\n pce_dict['pce_coeff_adm'] = pce_coeff_adm\n pce_dict['ed'] = ed\n pce_dict['ed_fevals'] = ed_fevals\n pce_dict['enum_func'] = enum_func\n pce_dict['mv_basis'] = mv_basis\n return pce_dict", "def integ_exact(model,func_params):\n\n\n if (model=='genz_osc'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=cos(2.*pi*gcf[0]+0.5*sum(gcf[1:]))\n for i in range(1,dim+1):\n integ_ex*=(2.*sin(gcf[i]/2.)/gcf[i])\n elif (model=='genz_exp'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=exp(-gcf[i]*gcf[0])\n at2=exp(gcf[i]*(1.-gcf[0]))\n integ_ex*=((at2-at1)/(gcf[i]))\n elif (model=='genz_cont'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n integ_ex*= ((2.-exp(gcf[i]*(-gcf[0]))-exp(gcf[i]*(gcf[0]-1.)))/gcf[i])\n elif (model=='genz_gaus'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=erf(-gcf[i]*gcf[0])\n at2=erf(gcf[i]*(1.-gcf[0]))\n integ_ex*=((at2-at1)*sqrt(pi)/(2.*gcf[i]))\n elif (model=='genz_cpeak'):\n gcf=func_params\n dim=gcf.shape[0]-1\n numer=0.0\n count=1\n denom=1.\n for i in range(1,dim+1):\n comb=list(itertools.combinations(range(1,dim+1),i))\n for j in range(len(comb)):\n assert(i==len(comb[j]))\n #print i,j,pow(-1,i)\n numer+=(pow(-1,i)/(1.+sum(gcf[list(comb[j])])))\n count+=1\n denom*=(i*gcf[i])\n #print count, numer\n integ_ex=(1.+numer)/denom\n elif (model=='genz_ppeak'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=np.arctan(-gcf[i]*gcf[0])\n at2=np.arctan(gcf[i]*(1.-gcf[0]))\n integ_ex*=(gcf[i]*(at2-at1))\n\n return integ_ex", "def mezclar_bolsa(self):", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def full_solver(output_folder, prior_filename, data_filename, resume=True, test_plot=False):\n\n\n def log_prior(cube, ndim, nparams):\n cube[0] = cube[0]*(L_lim[1] - L_lim[0]) + L_lim[0]\n cube[1] = cube[1]*(d_lim[1] - d_lim[0]) + d_lim[0]\n cube[2] = cube[2]*(F_lim[1] - F_lim[0]) + F_lim[0]\n cube[3] = cube[3]*(A_lim[1] - A_lim[0]) + A_lim[0]\n cube[4] = cube[4]*(Arel_lim[1] - Arel_lim[0]) + Arel_lim[0]\n cube[5] = cube[5]*(Ti_lim[1] - Ti_lim[0]) + Ti_lim[0]\n #cube[6] = cube[6]*(off_lim[1] - off_lim[0]) + off_lim[0]\n cube[6] = cube[6]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n #cube[7] = cube[7]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n\n\n def log_likelihood(cube, ndim, nparams):\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[7]], [Ti_Th, cube[5], Ti_Th],\n # [0.0, 0.0, 0.0], nlambda=2000)\n vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[6]], [Ti_Th, cube[5], Ti_Th],\n [0.0, 0.0, 0.0], nlambda=2000)\n #vals = offset_forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]],\n # [0.0, 0.0], sm_ang=False, nlambda=2000, coeff=0.5)\n #vals += cube[6]\n chisq = np.sum((vals - sig)**2 / error**2)\n return -chisq / 2.0\n\n data = io.h5_2_dict(data_filename)\n\n ix = data['fit_ix']['0']#[0:-1:2]\n r = data['r'][ix]\n sig = data['sig'][ix]\n error = data['sig_sd'][ix]\n\n Ti_Th = 0.025*1000.0 / 300.0\n\n px_size = 0.004# * 3 \n L_lim = [147.0, 153.0]\n L_lim = [x / px_size for x in L_lim]\n\n d_lim = [0.7, 1.0]\n\n F_lim = [17.0, 26.0]\n\n Amax = np.max(sig)\n A_lim = [0.75*Amax, 2.0*Amax]\n\n Arel_lim = [0.005, 0.6]\n Brel_lim = [0.001, 0.2]\n\n Ti_lim = [0.025, 1.0]\n #min_val = np.abs(np.min(data['sig'][ix]))\n min_val = 50.0\n off_lim = [0.0, min_val]\n #n_params = 6\n n_params = 7\n #n_params = 8\n folder = abspath(output_folder)\n\n if test_plot:\n pass\n # npts = 100\n # test_sig = np.zeros((npts, len(r)))\n # for i in xrange(npts):\n # cube = [random.random() for _ in xrange(n_params)] \n # log_prior(cube, None, None)\n # test_sig[i, :] = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]],\n # [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n\n # fig, ax = plt.subplots()\n # for i in xrange(npts):\n # ax.plot(r, test_sig[i, :], 'C0')\n # ax.errorbar(r, sig, yerr=error, fmt='', ecolor='C2', color='C1')\n # plt.show()\n\n else:\n pymultinest.run(log_likelihood, log_prior, n_params, importance_nested_sampling=False,\n resume=resume, verbose=True, sampling_efficiency='model', n_live_points=1000,\n outputfiles_basename=join(folder, 'full_'))", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def _analyze_opt_criterias_(criterias, sign_threshold, max_num_cofactors, file_prefix, with_qq_plots,\n lm, step_info_list, quantiles_dict, plot_bonferroni=True, cand_genes=None,\n plot_xaxis=True, log_qq_max_val=5, eig_L=None, type='emmax', highlight_loci=None,\n write_pvals=False, snp_priors=None, ppa_threshold=0.5, emma_num=None,\n save_pvals=False , **kwargs):\n ret_dict = {}\n opt_indices = {}\n opt_file_dict = {}\n for c in criterias:\n print 'GWAs for optimal %s criteria:' % c\n if c == 'bonf':\n opt_list = sp.arange(max_num_cofactors + 1)\n for i, pval in enumerate(criterias['bonf']):\n if pval > sign_threshold:\n opt_list[i] = -1\n i_opt = opt_list.argmax()\n elif c == 'mbonf':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n pval = criterias[c][i]\n if pval > sign_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n pval = criterias[c][i + shift]\n if pval > sign_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n elif c == 'min_cof_ppa':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n ppa = criterias[c][i]\n if ppa < ppa_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n ppa = criterias[c][i + shift]\n if ppa < ppa_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n\n else:\n cur_min_val = criterias[c][0]\n min_indices = [0]\n for i in range(1, len(criterias[c])):\n v = criterias[c][i]\n if v < cur_min_val:\n cur_min_val = v\n min_indices = [i]\n if v == cur_min_val:\n min_indices.append(i)\n i_opt = min(min_indices)\n # i_opt = sp.argmin(criterias[c])\n print \" %d'th step was optimal.\" % i_opt\n ret_dict[c] = i_opt\n if i_opt <= max_num_cofactors:\n # Copy the pngs...\n if file_prefix:\n png_file_name = '%s_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = '%s_ppa_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if with_qq_plots:\n qq_png_file_name = '%s_step%d_qqplot.png' % (file_prefix, i_opt)\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = '%s_step%d_log_qqplot.png' % (file_prefix, i_opt)\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n elif i_opt in opt_file_dict:\n if file_prefix:\n png_file_name = opt_file_dict[i_opt]['manhattan']\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = opt_file_dict[i_opt]['ppa_manhattan']\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n\n if with_qq_plots:\n qq_png_file_name = opt_file_dict[i_opt]['qq']\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = opt_file_dict[i_opt]['log_qq']\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n\n elif not i_opt in opt_indices:\n # Perfom GWAS witht the optimal cofactors\n cofactor_snps = step_info_list[i_opt]['cofactor_snps']\n cofactors = step_info_list[i_opt]['cofactors']\n print cofactors\n lm.set_factors(cofactor_snps)\n if type == 'emmax':\n eig_R = lm._get_eigen_R_(X=lm.X)\n reml_res = lm.get_REML(eig_L=eig_L, eig_R=eig_R)\n H_sqrt_inv = reml_res['H_sqrt_inv']\n l_res = lm._emmax_f_test_(kwargs['snps'], H_sqrt_inv, snp_priors=snp_priors,\n emma_num=emma_num)\n min_pval_i = l_res['ps'].argmin()\n mahalnobis_rss = l_res['rss'][min_pval_i]\n print 'Min Mahalanobis RSS:', mahalnobis_rss\n elif type == 'lm':\n l_res = lm.fast_f_test(kwargs['snps'])\n min_pval_i = l_res['ps'].argmin()\n\n min_pval = l_res['ps'][min_pval_i]\n\n min_pval_chr_pos = (kwargs['chromosomes'][min_pval_i], kwargs['positions'][min_pval_i])\n print 'Min p-value:', min_pval\n l_pvals = l_res['ps'].tolist()\n l_perc_var_expl = l_res['var_perc'].tolist()\n opt_indices[i_opt] = {'min_pval':min_pval, 'min_pval_chr_pos':min_pval_chr_pos,\n 'kolmogorov_smirnov':agr.calc_ks_stats(l_pvals),\n 'pval_median':agr.calc_median(l_pvals)}\n if file_prefix:\n opt_file_prefix = '%s_opt_%s' % (file_prefix, c)\n if snp_priors:\n ppa_cofactors = step_info_list[i_opt]['ppa_cofactors']\n ppas = l_res['ppas'].tolist()\n else:\n ppas = None\n ppa_cofactors = None\n opt_file_dict[i_opt], res = _plot_manhattan_and_qq_(opt_file_prefix, i_opt, l_pvals, quantiles_dict,\n plot_bonferroni=True, highlight_markers=cofactors,\n cand_genes=cand_genes, plot_xaxis=plot_xaxis,\n log_qq_max_val=log_qq_max_val, with_qq_plots=with_qq_plots,\n simple_qq=True, highlight_loci=highlight_loci,\n write_pvals=write_pvals, highlight_ppa_markers=ppa_cofactors,\n ppas=ppas, perc_var_expl=l_perc_var_expl, save_pvals=save_pvals,\n **kwargs)\n if save_pvals:\n opt_indices['res'] = opt_file_dict[i_opt]['res']\n\n if type == 'emmax':\n opt_indices[i_opt]['mahalanobis_rss'] = mahalnobis_rss\n opt_indices[i_opt]['res'] = res\n return ret_dict, opt_indices", "def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik", "def falcon():", "def alphabeta_search(state, d=1, cutoff_test=None, eval_fn=None, start_time=None, turn_number=None):\n global count\n global testing\n global BigInitialValue\n global MoveTimes\n\n player = state.to_move\n count = 0\n\n def max_value(state, alpha, beta, depth):\n global count, testing\n if testing:\n print(\" \"* depth, \"Max alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"* depth, \"Max cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = -BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"maxDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = max(v, max_value(s, alpha, beta, depth+1))\n else:\n v = max(v, min_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"* depth, \"max best value:\", v)\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n\n def min_value(state, alpha, beta, depth):\n global count\n if testing:\n print(\" \"*depth, \"Min alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"*depth, \"Min cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"minDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = min(v, min_value(s, alpha, beta, depth+1))\n else:\n v = min(v, max_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"*depth, \"min best value:\", v)\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n\n def right_value(s, alpha, beta, depth):\n if s.to_move.id == state.to_move.id:\n return max_value(s, -BigInitialValue, BigInitialValue, 0)\n else:\n return min_value(s, -BigInitialValue, BigInitialValue, 0)\n\n def argmin(seq, fn):\n \"\"\"Return an element with lowest fn(seq[i]) score; tie goes to first one.\n >>> argmin(['one', 'to', 'three'], len)\n 'to'\n \"\"\"\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best\n\n def argmax(seq, fn):\n \"\"\"Return an element with highest fn(seq[i]) score; tie goes to first one.\n >>> argmax(['one', 'to', 'three'], len)\n 'three'\n \"\"\"\n return argmin(seq, lambda x: -fn(x))\n\n # Body of alphabeta_search starts here:\n cutoff_test = (cutoff_test or\n (lambda state,depth: depth>d or state.game.terminal_test(state)))\n eval_fn = eval_fn or (lambda state: state.game.utility(state, turn_number))\n action, state = argmax(state.game.successors(state),\n lambda a_s: right_value(a_s[1], -BigInitialValue, BigInitialValue, 0))\n\n # calculate move time, round to 2 decimal places, store for analysis\n MoveTimes.append(round(time.time() - start_time, 2))\n return action", "def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]", "def abv(og, fg):\n return abw(og, fg) * fg / 0.794", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def MVAE_objective(ce_weight, modal_loss_funcs, recon_weights, input_to_float=True, annealing=1.0, criterion=torch.nn.CrossEntropyLoss()):\n recon_loss_func = elbo_loss(modal_loss_funcs, recon_weights, annealing)\n\n def allnonebuti(i, item):\n ret = [None for w in modal_loss_funcs]\n ret[i] = item\n return ret\n\n def actualfunc(pred, truth, args):\n training = args['training']\n reps = args['reps']\n fusedmu, fusedlogvar = args['fused']\n decoders = args['decoders']\n inps = args['inputs']\n reconsjoint = []\n\n if input_to_float:\n inputs = [i.float().cuda() for i in inps]\n else:\n inputs = [i.cuda() for i in inps]\n for i in range(len(inps)):\n reconsjoint.append(decoders[i](\n reparameterize(fusedmu, fusedlogvar, training)))\n total_loss = recon_loss_func(reconsjoint, inputs, fusedmu, fusedlogvar)\n for i in range(len(inps)):\n mu, logvar = reps[i]\n recon = decoders[i](reparameterize(mu, logvar, training))\n total_loss += recon_loss_func(allnonebuti(i, recon),\n allnonebuti(i, inputs[i]), mu, logvar)\n total_loss += ce_weight * criterioning(pred, truth, criterion)\n return total_loss\n return actualfunc", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))", "def solve(self):\n ...", "def test_extension(roi,name,nb,distmax,gradient=True,update=True,bandfits=True):\n \n #save previous position of the source.\n roi.save('roi_temp.dat')\n source=source=roi.get_source(which=name)\n Parameters=source.spatial_model.get_parameters()\n ra_puls=Parameters[0]\n dec_puls=Parameters[1]\n \n ll,TS=localize_func(roi,name,nb,gradient=True,update=True,bandfits=bandfits)\n\n source=source=roi.get_source(which=name)\n Parameters=source.spatial_model.get_parameters()\n ra=Parameters[0]\n dec=Parameters[1]\n sigma=Parameters[2]\n \n source=source=roi.get_source(which=name)\n \n source=source=roi.get_source(which=name)\n if (dist(ra_puls,dec_puls,ra,dec)-sigma)<distmax:\n print \"Source consistent with the position of the pulsar : distance =%.2f\"%dist(ra,dec,source.skydir.ra(),source.skydir.dec())\n else :\n print \"Source unconsistent with the position of the pulsar : distance =%.2f\"%dist(ra,dec,source.skydir.ra(),source.skydir.dec())\n roi=load(\"roi_temp.dat\")\n \n os.system(\"rm -rf roi_temp.dat\")\n \n return roi", "def parametersweep(basedir,configfile,acfdir='ACF',invtype='tik'):\n\n alpha_sweep=sp.logspace(-3.5,sp.log10(7),25)\n costdir = os.path.join(basedir,'Cost')\n ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')\n ionoin=IonoContainer.readh5(ionoinfname)\n \n dirio = ('Spectrums','Mat','ACFMat')\n inputdir = os.path.join(basedir,dirio[0])\n \n dirlist = glob.glob(os.path.join(inputdir,'*.h5'))\n (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)\n Ionolist = [dirlist[ikey] for ikey in listorder]\n \n RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype='Sim')\n \n npts=RSTO.simparams['numpoints']\n \n ionospec=makeionocombined(dirlist)\n if npts==ionospec.Param_List.shape[-1]:\n tau,acfin=spect2acf(ionospec.Param_Names,ionospec.Param_List)\n nloc,ntimes=acfin.shape[:2]\n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n acfin_amb=sp.zeros((nloc,ntimes,np),dtype=acfin.dtype)\n # get the original acf\n \n \n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n \n for iloc,locarr in enumerate(acfin):\n for itime,acfarr in enumerate(locarr):\n acfin_amb[iloc,itime]=sp.dot(ambmat,acfarr)\n acfin_amb=acfin_amb[:,0]\n else:\n acfin_amb=ionospec.Param_List[:,0]\n \n if not os.path.isdir(costdir):\n os.mkdir(costdir)\n # pickle file stuff \n pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))\n\n alpha_list=[]\n errorlist=[]\n errorlaglist=[]\n datadiflist=[]\n constlist=[]\n if 'perryplane' in basedir.lower() or 'SimpData':\n rbounds=[-500,500]\n else:\n rbounds=[0,500]\n\n alpha_list_new=alpha_sweep.tolist()\n for i in alpha_list:\n if i in alpha_list_new:\n alpha_list_new.remove(i)\n \n for i in alpha_list_new:\n ionoout,datadif,constdif=invertRSTO(RSTO,ionoin,alpha_list=i,invtype=invtype,rbounds=rbounds,Nlin=1)\n \n datadiflist.append(datadif)\n constlist.append(constdif)\n acfout=ionoout.Param_List[:,0]\n alpha_list.append(i)\n outdata=sp.power(sp.absolute(acfout-acfin_amb),2)\n aveerror=sp.sqrt(sp.nanmean(outdata,axis=0))\n errorlaglist.append(aveerror)\n errorlist.append(sp.nansum(aveerror))\n \n pickleFile = open(pname, 'wb')\n pickle.dump([alpha_list,errorlist,datadiflist,constlist,errorlaglist],pickleFile)\n pickleFile.close()\n mkalphalist(pname)\n alphaarr=sp.array(alpha_list)\n errorarr=sp.array(errorlist)\n errorlagarr=sp.array(errorlaglist)\n datadif=sp.array(datadiflist)\n constdif=sp.array(constlist)\n fig,axlist,axmain=plotalphaerror(alphaarr,errorarr,errorlagarr)\n fig.savefig(os.path.join(costdir,'cost{0}-{1}.png'.format(acfdir,invtype)))\n \n fig,axlist=plotLcurve(alphaarr,datadif,constdif)\n fig.savefig(os.path.join(costdir,'lcurve{0}-{1}.png'.format(acfdir,invtype)))", "def solver_auto_param(u_init, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, eta_step = 0.5, eta_step_tumor = 0.99, ftol = 1e-3, max_iter = 300, verbose = 0, nnls_max_iter=30):\n auto_param_obj_history = []\n auto_param_relaxed_obj_history = []\n \n eta_0 = (1/(2*np.max(B)))*0.5 #Initialize eta_0\n eta = np.array([eta_0/len(H)]*len(H))*0.9\n eta_lin = np.ones(L_lhs.shape[0])*0.01\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u_init, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 300, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Feasibility')\n count = 0\n num_violated = -1\n while (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n count += 1\n num_violated_prev = np.copy(num_violated)\n num_violated_oar = len(H) - cnstr['Relaxed'].sum()\n num_violated_lin = L_lhs.shape[0] - np.sum(cnstr_linear)#(1 - int(cnstr_linear))\n num_violated = len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))#(1 - int(cnstr_linear))\n \n print('Iter ', count, '# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n print(' Linear constraints on u violation:', L_lhs.shape[0] - np.sum(cnstr_linear))\n eta[cnstr['Relaxed'] == False] *= eta_step\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n # eta_lin *= eta_step\n \n if num_violated == num_violated_prev:\n print('Increase enforcement')\n if num_violated_lin > 0:\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n #eta_lin *= eta_step\n if num_violated_oar > 0:\n eta[cnstr['Relaxed'] == False] *= eta_step\n # eta_0 *= eta_step*2\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Optimality')\n count = 0\n while not (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n # (cnstr['Relaxed'].sum()-len(H)): #If nothing is violated -- enforce optimality!\n count += 1\n print('Opt Iter', count)\n obj_prev = obj_u_opt_N_fixed(u, T, alpha, B)\n u_prev = np.copy(u)\n eta_0 *= eta_step_tumor\n print('Current eta_0:', eta_0)\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter//2, verbose = verbose, nnls_max_iter=nnls_max_iter)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n \n obj_new = obj_u_opt_N_fixed(u, T, alpha, B)\n if (abs(obj_new - obj_prev)/abs(obj_prev) <= 1e-4) or (obj_new > obj_prev): #two consequent iters, two times bc on iter 2 it stops anyway\n print('No improvement, increase enforcement')\n eta_step_tumor *= 0.1\n eta_0 *= eta_step_tumor\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # break\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))#(1 - int(cnstr_linear)))\n \n print('Finding the correct solution:')\n u = u_prev\n eta_0 = eta_0/eta_step_tumor\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n # print('# of violated constr:', cnstr['Relaxed'].sum()-len(H))\n print(\"OBJJJJJ:\", obj_u_opt_N_fixed(u, T, alpha, B))\n return u, w_0, w, w_lin, eta_0, eta, eta_lin, auto_param_obj_history, auto_param_relaxed_obj_history" ]
[ "0.5649461", "0.56487626", "0.5537902", "0.55107564", "0.5501089", "0.54266936", "0.5403812", "0.53776634", "0.5360801", "0.5356619", "0.5301135", "0.52775687", "0.5269604", "0.5262089", "0.52313644", "0.5223302", "0.5188832", "0.5179477", "0.516795", "0.51599634", "0.5152701", "0.51464236", "0.5137843", "0.5114498", "0.51131386", "0.5110136", "0.510727", "0.5101229", "0.5094462", "0.5088486" ]
0.599668
0
Fit the histogram of the input image under mask with the reference image.
def ce_fit(inp_image, ref_image, mask_image): hist_res = Util.histc(ref_image, inp_image, mask_image) args = hist_res["args"] scale = hist_res["scale"] data = [hist_res['data'], inp_image, hist_res["ref_freq_bin"], mask_image, int(hist_res['size_img']), hist_res['hist_len']] res = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data) resu = ["Final Parameter [A,B]:", res[0], "Final Chi-square :", -1*res[1], "Number of Iteration :", res[2]] corrected_image = inp_image*res[0][0] + res[0][1] result = [resu,"Corrected Image :",corrected_image] del data[:], args[:], scale[:] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hist2d(x,y,nbins = 50 ,maskval = 0,saveloc = '',labels=[],slope = 1,sloperr = 0):\n\t# Remove NANs and masked values\n\tgood = where((isnan(x) == False) & (isnan(y) == False) & (x != maskval) & (y != maskval))\n\tx = x[good]\n\ty = y[good]\n\n\t# Create histogram\n\tH,xedges,yedges = histogram2d(x,y,bins=nbins)\n\t# Reorient appropriately\n\tH = rot90(H)\n\tH = flipud(H)\n\t# Mask zero value bins\n\tHmasked = ma.masked_where(H==0,H)\n\t# Find average values in y:\n\tyavgs = []\n\tystds = []\n\txposs = []\n\tfor j in range(len(xedges)-1):\n\t\ttoavg = where((x > xedges[j]) & (x < xedges[j+1]))\n\t\txpos = np.mean(x[toavg])\n\t\tyavg = np.median(y[toavg])\n\t\tystd = np.std(y[toavg])/len(y[toavg])\n\t\txposs.append(xpos)\n\t\tyavgs.append(yavg)\n\t\tystds.append(ystd)\n\t# Begin creating figure\n\tplt.figure(figsize=(12,10))\n\t# Make histogram pixels with logscale\n\tplt.pcolormesh(xedges,yedges,Hmasked,\n\t norm = LogNorm(vmin = Hmasked.min(),\n\t vmax = Hmasked.max()),\n\t\t \t cmap = plt.get_cmap('Spectral_r'))\n\t# Create fit line x-array\n\tuplim = nmax(x)+5\n\tdolim = nmin(x)-5\n\tx_range = arange(dolim,uplim)\n\t# Plot fit line\n\tplt.plot(x_range,slope*x_range,color = 'royalblue',linewidth = 3,label = 'Slope = {0}, Uncertainty = {1}'.format(slope,sloperr))\n\t# Plot average points\n\tplt.errorbar(xposs,yavgs,yerr = ystds,fmt = 'D',color='k',markersize = 5)\n\t# Set plot limits\n\tplt.xlim(dolim+5,uplim-5)\n\tplt.ylim(nmin(y),nmax(y))\n\t# Add colourbar\n\tcbar = plt.colorbar()\n\t# Add labels\n\tif labels != []:\n\t title,xlabel,ylabel,zlabel = labels\n\t plt.xlabel(xlabel)\n\t plt.ylabel(ylabel)\n\t plt.title(title)\n\t cbar.ax.set_ylabel(zlabel)\n\t plt.legend(loc = 'best',fontsize = 15)\n\t# Save plot\n\tif saveloc != '':\n\t\tplt.savefig(saveloc)\n\tplt.close()\n\t# Return histogram\n\treturn xedges,yedges,Hmasked", "def overlay_prob(image, mask, cutoff=0.5):\n if len(image.shape) == 3:\n image = image[: ,: ,0]\n if len(mask.shape) == 3:\n mask = mask[: ,: ,0]\n if np.amax(image) > 100:\n image = image /255\n\n mask = mask>=cutoff\n mask = mask.astype(int)\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def fit_reference(self, img):\n if self.refzone:\n meanfit, fit = self._get_reference_fit(img)\n img = self._overlay_ref_fit(img, meanfit, fit)\n self._set_offset(*meanfit)\n\n return img", "def describe(self, image, mask=None):\n histogram = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n\n return histogram.flatten()", "def _hist_match_image(im, targ, mask, inplace=True):\n # Copy?\n if not inplace:\n im = im.copy()\n\n # Add a small amount of random noise to break ties for sorting in next\n # step.\n im += 0.1 * np.random.rand(*im.shape)\n\n # Sort image pixels (we actually only need indices of sort)\n if mask is None:\n idcs = np.argsort(im.flat)\n else:\n idcs = np.argsort(im[mask].flat)\n\n # Replace image histogram with target histogram, using idcs to place\n # pixels at correct positions\n svim = np.empty(len(idcs))\n svim[idcs] = targ\n if mask is None:\n im[:] = svim.reshape(im.shape)\n else:\n im[mask] = svim\n\n # Return?\n if not inplace:\n return im", "def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)", "def predict_mask(logit, EMPTY_THRESHOLD, MASK_THRESHOLD):\n #pred mask 0-1 pixel-wise\n #n = logit.shape[0]\n IMG_SIZE = logit.shape[-1] #256\n #EMPTY_THRESHOLD = 100.0*(IMG_SIZE/128.0)**2 #count of predicted mask pixles<threshold, predict as empty mask image\n #MASK_THRESHOLD = 0.22\n #logit = torch.sigmoid(torch.from_numpy(logit)).view(n, -1)\n #pred = (logit>MASK_THRESHOLD).long()\n #pred[pred.sum(dim=1) < EMPTY_THRESHOLD, ] = 0 #bug here, found it, the bug is input shape is (256, 256) not (16,256,256)\n logit = sigmoid(logit)#.reshape(n, -1)\n pred = (logit>MASK_THRESHOLD).astype(np.int)\n if pred.sum() < EMPTY_THRESHOLD:\n return np.zeros(pred.shape).astype(np.int)\n else:\n return pred", "def mask_and_fit(mask, binary_warped, flag):\n img = cv2.bitwise_and(binary_warped, binary_warped, mask=mask)\n x, y = extract_pixels(img)\n fit, foundFlag, confidence_index = check_and_fit(x, y, flag)\n return fit, foundFlag, confidence_index", "def _overlay_ref_fit(self, img, mean, fit, off=25):\n\n def plus(img, x, y, val=0, r=10):\n img[x - 1:x, y - r:y + r], img[x - r:x + r, y - 1:y] = val, val\n return img\n\n if len(self.refzone) != 4:\n return img\n\n centers = [(self.ref_rc[0] - off, self.ref_rc[1] - off),\n (self.ref_rc[0] - off, self.ref_rc[1] + off),\n (self.ref_rc[0] + off, self.ref_rc[1] - off),\n (self.ref_rc[0] + off, self.ref_rc[1] + off)]\n\n img = plus(img, self.ref_rc[0], self.ref_rc[1], val=150, r=15) # final mean offset\n img = plus(img, self.ref_rc[0] + mean[0], self.ref_rc[1] + mean[1], val=0)\n for [x0, x1, y0, y1], [x_off, y_off], (cx, cy) in zip(self.refzone, fit, centers):\n img = plus(img, cx, cy, val=120, r=15) # panel fitted\n img = plus(img, cx + x_off, cy + y_off, val=0) # panel reference\n img = plus(img, x0, y0, val=150) # expected reference\n img = plus(img, x1, y1, val=150) #\n img = plus(img, x0 + x_off, y0 + y_off, val=0) # actual fitted\n img = plus(img, x1 + x_off, y1 + y_off, val=0) #\n\n return img", "def histogram_stretching(img):\n\n img_copy = np.copy(img)\n\n img_min = img_copy.min()\n img_max = img_copy.max()\n\n if img_min == img_max:\n return None\n\n img_copy = (img_copy-img_min)/(img_max-img_min) * 255\n\n return img_copy", "def create_image_fits(base_dir,fits_img,outroot, bin_file, temp_file):\n bins, min_x, max_x, min_y, max_y = read_in(base_dir+'/'+bin_file,base_dir+'/'+temp_file)\n # Create image array\n x_len = int(max_x-min_x)\n y_len = int(max_y-min_y)\n temp_array = np.zeros((x_len,y_len))\n percentage_array = np.zeros((x_len,y_len))\n for bin in bins:\n for pixel in bin.pixels:\n #print(bin.temp)\n try:\n temp_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = int(bin.temp)\n percentage_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = float(bin.percentage)\n except:\n #print(bin.temp)\n pass\n # Copy header\n fits_ = fits.open(base_dir+'/'+fits_img)\n hdr = header=fits_[0].header\n # Change image\n hdu = fits.PrimaryHDU(temp_array)\n hdul = fits.HDUList([hdu])\n fits.writeto(base_dir+'/component_bins.fits', temp_array.T, hdr, overwrite=True)\n fits.writeto(base_dir+'/percentage_bins.fits', percentage_array.T, hdr, overwrite=True)", "def equalize_hist(input):\n return np.float32(skimage.exposure.equalize_hist(input.numpy()))", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF_SetInput(self, input)", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def find_histogram(vol, hist, mini, maxi, mask, use_mask):\n validsize = 0\n hist = np.zeros(hist.size, dtype=int)\n if mini == maxi:\n return -1\n\n fA = float(hist.size)/(maxi-mini)\n fB = (float(hist.size)*float(-mini)) / (maxi-mini)\n\n if use_mask:\n a = vol[mask > 0.5].flatten()\n else:\n a = vol.flatten()\n\n a = (a*fA + fB).astype(int)\n h = hist.size - 1\n\n for i in np.arange(a.size):\n hist[max(0, min(a[i], h))] += 1\n validsize += 1\n\n return hist, validsize", "def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def _histogram_equalize_image(image, hist_orig):\n cum_hist = np.cumsum(hist_orig)\n cum_hist = (cum_hist * 255) / cum_hist[-1]\n\n image = np.interp(image, np.linspace(0, 1, 256), np.round(cum_hist))\n\n return utils.normalize_image(image)", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def Histogram_Matching(inImFile, outImFile, refImFile,\n number_of_histogram_levels=1024,\n number_of_match_points=7,\n threshold_at_mean_intensity=False):\n inputIm = sitk.ReadImage(inImFile)\n referenceIm = sitk.ReadImage(refImFile)\n histMatchingFilter = sitk.HistogramMatchingImageFilter()\n histMatchingFilter.SetNumberOfHistogramLevels(number_of_histogram_levels)\n histMatchingFilter.SetNumberOfMatchPoints(number_of_match_points)\n histMatchingFilter.SetThresholdAtMeanIntensity(threshold_at_mean_intensity)\n outputIm = histMatchingFilter.Execute(inputIm, referenceIm)\n if outImFile is not None:\n sitk.WriteImage(outputIm, outImFile, True)\n return outputIm", "def analyze_index(index_array, mask, histplot=False, bins=100, min_bin=0, max_bin=1, label=\"default\"):\n params.device += 1\n\n debug = params.debug\n params.debug = None\n analysis_image = None\n\n if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:\n fatal_error(\"Mask should be a binary image of 0 and nonzero values.\")\n\n if len(np.shape(index_array.array_data)) > 2:\n fatal_error(\"index_array data should be a grayscale image.\")\n\n # Mask data and collect statistics about pixels within the masked image\n masked_array = index_array.array_data[np.where(mask > 0)]\n masked_array = masked_array[np.isfinite(masked_array)]\n\n index_mean = np.nanmean(masked_array)\n index_median = np.nanmedian(masked_array)\n index_std = np.nanstd(masked_array)\n\n # Set starting point and max bin values\n maxval = max_bin\n b = min_bin\n\n # Calculate observed min and max pixel values of the masked array\n observed_max = np.nanmax(masked_array)\n observed_min = np.nanmin(masked_array)\n\n # Auto calculate max_bin if set\n if type(max_bin) is str and (max_bin.upper() == \"AUTO\"):\n maxval = float(round(observed_max, 8)) # Auto bins will detect maxval to use for calculating labels/bins\n if type(min_bin) is str and (min_bin.upper() == \"AUTO\"):\n b = float(round(observed_min, 8)) # If bin_min is auto then overwrite starting value\n\n # Print a warning if observed min/max outside user defined range\n if observed_max > maxval or observed_min < b:\n print(\"WARNING!!! The observed range of pixel values in your masked index provided is [\" + str(observed_min) +\n \", \" + str(observed_max) + \"] but the user defined range of bins for pixel frequencies is [\" + str(b) +\n \", \" + str(maxval) + \"]. Adjust min_bin and max_bin in order to avoid cutting off data being collected.\")\n\n # Calculate histogram\n hist_val = [float(i[0]) for i in cv2.calcHist([masked_array.astype(np.float32)], [0], None, [bins], [b, maxval])]\n bin_width = (maxval - b) / float(bins)\n bin_labels = [float(b)]\n plotting_labels = [float(b)]\n for i in range(bins - 1):\n b += bin_width\n bin_labels.append(b)\n plotting_labels.append(round(b, 2))\n\n # Make hist percentage for plotting\n pixels = cv2.countNonZero(mask)\n hist_percent = [(p / float(pixels)) * 100 for p in hist_val]\n\n params.debug = debug\n\n if histplot is True:\n dataset = pd.DataFrame({'Index Reflectance': bin_labels,\n 'Proportion of pixels (%)': hist_percent})\n fig_hist = (ggplot(data=dataset,\n mapping=aes(x='Index Reflectance',\n y='Proportion of pixels (%)'))\n + geom_line(color='red')\n + scale_x_continuous(breaks=bin_labels, labels=plotting_labels))\n analysis_image = fig_hist\n if params.debug == 'print':\n fig_hist.save(os.path.join(params.debug_outdir,\n str(params.device) + index_array.array_type + \"hist.png\"), verbose=False)\n elif params.debug == 'plot':\n print(fig_hist)\n\n outputs.add_observation(sample=label, variable='mean_' + index_array.array_type,\n trait='Average ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_mean), label='none')\n\n outputs.add_observation(sample=label, variable='med_' + index_array.array_type,\n trait='Median ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_median), label='none')\n\n outputs.add_observation(sample=label, variable='std_' + index_array.array_type,\n trait='Standard deviation ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_std), label='none')\n\n outputs.add_observation(sample=label, variable='index_frequencies_' + index_array.array_type,\n trait='index frequencies', method='plantcv.plantcv.analyze_index', scale='frequency',\n datatype=list, value=hist_percent, label=bin_labels)\n\n if params.debug == \"plot\":\n plot_image(masked_array)\n elif params.debug == \"print\":\n print_image(img=masked_array, filename=os.path.join(params.debug_outdir, str(params.device) +\n index_array.array_type + \".png\"))\n # Store images\n outputs.images.append(analysis_image)\n\n return analysis_image", "def create_fixed_hist(self):\n hist = cv2.calcHist([self.obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n self.hist = cv2.normalize(hist).flatten()\n print self.hist", "def adaptive_hist(image):\n mask = np.zeros(image.shape[:2], np.uint8)\n # spatially weighted by Gaussian distribtuion?\n mask = cv2.ellipse(mask, (image.shape[1] // 2,image.shape[0] // 2),\n (image.shape[1] // 2,image.shape[0] // 2), 0, 0, 360, 255, -1)\n\n # RGB color histogram\n hist1 = cv2.calcHist([image], [0], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([image], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist3 = cv2.calcHist([image], [2], mask, [16], [0, 256]).reshape(1, -1)\n rgb_hist = np.concatenate((hist1, hist2, hist3), axis=1)\n cv2.normalize(rgb_hist, rgb_hist)\n\n # HSV color histogram\n img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hist1 = cv2.calcHist([img_hsv], [0], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_hsv], [1], mask, [16], [0, 256]).reshape(1, -1)\n hsv_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(hsv_hist, hsv_hist)\n\n # YCrCb color histogram\n img_YCrCb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)\n hist1 = cv2.calcHist([img_YCrCb], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_YCrCb], [2], mask, [16], [0, 256]).reshape(1, -1)\n YCrCb_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(YCrCb_hist, YCrCb_hist)\n\n # Lab color histogram\n img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n hist1 = cv2.calcHist([img_lab], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_lab], [2], mask, [16], [0, 256]).reshape(1, -1)\n lab_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(lab_hist, lab_hist)\n\n # Hog\n #image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #image_gray = cv2.resize(image_gray, (200,200))\n #hog_hist = hog(image_gray, orientations=8, block_norm = 'L2-Hys', pixels_per_cell=(50,50), cells_per_block=(1,1), visualize=False).reshape(1, -1)\n #cv2.normalize(hog_hist, hog_hist)\n\n # type?\n #type_hist = np.zeros(8).reshape(1,8) + 0.5\n #type_hist[0, int(image_path[-5])] = 1\n #cv2.normalize(type_hist, type_hist)\n\n #thist = np.transpose(np.concatenate((3 * rgb_hist, hsv_hist, YCrCb_hist, lab_hist, hog_hist), axis=1))\n thist = np.transpose(np.concatenate((3 * rgb_hist, hsv_hist, YCrCb_hist, lab_hist), axis=1))\n thist = thist / sum(thist)\n\n return np.transpose(thist)[0]", "def Adjust_Data(img,mask,feature_dict, normalize):\n ## Normalize image\n if normalize:\n img = Normalize_Image(img)\n\n ## Assume mask shape has 4 dimensions - mask is (batch, x, y, color-channel)\n ## color-channels are redundant, so just choose the first. \n mask = mask[:,:,:,0]\n \n ## Image_datagen performs interpolation when rotating, resulting in non-integer\n ## mask values. Round these back to integers before expanding the mask. \n mask = mask.round() \n mask = Expand_Mask(mask, feature_dict)\n #print(mask.shape, np.unique(mask, axis = 0))\n return (img,mask)", "def final_mask(path, output_mask, percentage=0.5):\n with fits.open(path, \"readonly\") as temp_mask:\n mask_data = temp_mask[0].data\n mask_header = temp_mask[0].header\n mask_data[mask_data >= percentage] = 1\n mask_data[mask_data < percentage] = 0\n fits.writeto(output_mask, mask_data, mask_header, clobber=True)", "def _histogram(image,\n min,\n max,\n bins):\n\n return numpy.histogram(image, bins, (min, max))[0]", "def hbond_frequency(mask):\n return mask.sum(axis=0)/len(mask)", "def _get_reference_fit(self, img):\n bw_img = 255 * (img >= self.contrast)\n fit = [center_on_box(bw_img, self.radius, self.min_ref, *ref) for ref in self.refzone]\n meanfit = num.mean(num.ma.masked_array(fit, fit == -9999), axis=0).astype('i')\n if meanfit[0] is num.ma.masked:\n raise StandardError('At least one reference box match required')\n\n return meanfit, fit", "def _compute_histogram(self, x, momentum):\n num_bins = self.histogram.size(0)\n x_detached = x.detach()\n self.bin_width = (self._max_val - self._min_val) / (num_bins - 1)\n lo = torch.floor((x_detached - self._min_val) / self.bin_width).long()\n hi = (lo + 1).clamp(min=0, max=num_bins - 1)\n hist = x.new_zeros(num_bins)\n alpha = (\n 1.0\n - (x_detached - self._min_val - lo.float() * self.bin_width)\n / self.bin_width\n )\n hist.index_add_(0, lo, alpha)\n hist.index_add_(0, hi, 1.0 - alpha)\n hist = hist / (hist.sum() + 1e-6)\n self.histogram = (1.0 - momentum) * self.histogram + momentum * hist" ]
[ "0.5717073", "0.55807126", "0.5528441", "0.55023885", "0.54681766", "0.54579365", "0.54448426", "0.54230475", "0.54140556", "0.5352749", "0.53307045", "0.5266392", "0.52623886", "0.5244729", "0.52365315", "0.52273583", "0.52006847", "0.52000165", "0.5193751", "0.5170942", "0.5166152", "0.515521", "0.5148498", "0.5131553", "0.513034", "0.5128944", "0.51003295", "0.509646", "0.5091102", "0.50773436" ]
0.57911646
0
Find the position of the commone line in 3D Formula is (RB^T zhat) cross (RA^T zhat) Returns phi, theta of the common line in degrees. theta always < 90 Notice you don't need to enter psi's; they are irrelevant
def common_line_in3D(phiA,thetaA,phiB,thetaB): from math import pi, sqrt, cos, sin, asin, atan2 piOver=pi/180.0; ph1 = phiA*piOver; th1 = thetaA*piOver; ph2 = phiB*piOver; th2 = thetaB*piOver; #nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ; #ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ; #nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR); nx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2) ny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2) nz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2) norm = nx*nx + ny*ny + nz*nz if norm < 1e-5: #print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB return 0.0, 0.0 if nz<0: nx=-nx; ny=-ny; nz=-nz; #thetaCom = asin(nz/sqrt(norm)) phiCom = asin(nz/sqrt(norm)) #phiCom = atan2(ny,nx) thetaCom = atan2(ny, nx) return phiCom*180.0/pi , thetaCom*180.0/pi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta_finder(theta, point_a, point_b, point_c, point_c_new):\n x, y, z = parametrized_circle(point_a, point_b, point_c, theta)\n residual = (x - point_c_new[0])**2 + (y - point_c_new[1])**2 + (z - point_c_new[2])**2\n return residual", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def CR(phi):\n return (np.kron(P0,s0) + np.kron(P1,R(phi)))", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def R3(theta):\n\n DCM = np.array([[np.cos(theta), np.sin(theta), 0], \n [-np.sin(theta), np.cos(theta), 0], \n [0, 0, 1]])\n\n return DCM", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def intersection_right(theta):\n a_ccs_rsm_tran = hom_translation_matrix(\n t_x=0.139807669447128, t_y=-0.0549998406976098, t_z=-0.051)\n a_ccs_rsm_rot = hom_rotation(z_axis_rotation_matrix(radians(-15.0)))\n a_mcs_2_joint = hom_rotation(z_axis_rotation_matrix(theta))\n a_mcs_2_sp_2_1 = hom_translation_matrix(\n t_x=0.085, t_y=0, t_z=-0.0245)\n\n a_ccs_sp_2_1 = a_ccs_rsm_tran @ a_ccs_rsm_rot @ a_mcs_2_joint @ a_mcs_2_sp_2_1\n return get_translation(a_ccs_sp_2_1)", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def pol2cart(theta, rho, z=None):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n\n if z is None:\n return x, y\n else:\n return x, y, z", "def calc_torsion_phi(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n paC = prev_res.get_atom('C')\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n return AtomMath.calc_torsion_angle(paC, aN, aCA, aC)", "def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle", "def _get_pt_theta(self, R, C, base_rad_m=0.2):\n #rospy.loginfo(\"Received R: %s, C: %s\" % (R, C))\n G = C - R\n #rospy.loginfo(\"Calculated G = %s\" % G)\n G_mag = np.linalg.norm(G)\n #rospy.loginfo(\"Calculated G_mag = %s\" % G_mag)\n\n # magnitude of distance for goal is magnitude of distance\n # between points - radius of base\n G_p_mag = G_mag - base_rad_m \n #rospy.loginfo(\"Then G_p_mag = %s\" % G_p_mag)\n gx, gy = G[0,0], G[1, 0]\n #rospy.loginfo(\"gx is %s, gy is %s\" % (gx, gy))\n theta = np.arctan(gy/gx)\n # Handle cases where tangent wraps around\n if gx < 0.0:\n theta += np.pi\n #rospy.loginfo(\"Then theta is %s radians (%s degrees)\" % (theta, np.rad2deg(theta)))\n G_p = G_p_mag * (np.array([np.cos(theta), np.sin(theta)]).reshape(-1, 1))\n #rospy.loginfo(\"G_p is %s\" % G_p)\n pt = R + G_p\n #rospy.loginfo(\"Finally, pt is %s\" % pt)\n #rospy.loginfo(\"Determined pt = %s and theta = %s\" % (pt, theta))\n\n return pt, theta", "def theta_phi(Collimator_square, sample_point):\n p1,p2,p3,p4=Collimator_square\n\n points = np.array([sample_point-p1, sample_point-p2, sample_point-p3, sample_point-p4])\n points=points.transpose(1,0,2) #shape: (pointsNum,4,3)\n\n theta = np.arctan2(points[:, :, 0],points[:, :, 1] )\n\n norm_x_y=np.sqrt(points[:, :, 0]**2+points[:, :, 1]**2)\n phi = np.arctan2(norm_x_y, points[:, :, 2])\n\n return theta, phi", "def rotate(prime_pos, theta, phi):\n light_dir = np.array([0, 0, 1])\n origin_prime = np.array(prime_pos)\n light_dir = phi_rot(light_dir, phi)\n light_dir = theta_rot(light_dir, theta)\n # origin = phi_rot(origin_prime, phi)\n origin = theta_rot(origin_prime, theta)\n return origin, light_dir", "def calculate_theta_vals(self) -> None:\n A = np.zeros(self.num_points) # Inappropriate names, but they mirror Knuth's notation.\n B = np.zeros(self.num_points)\n C = np.zeros(self.num_points)\n D = np.zeros(self.num_points)\n R = np.zeros(self.num_points)\n\n # Calculate the entries of the five vectors.\n # Skip first and last point if path is non-cyclic.\n point_ind = range(self.num_points) if self.is_cyclic else range(1, self.num_points - 1)\n for i in point_ind:\n z_h = self.points[i - 1]\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n\n A[i] = z_h.alpha / (z_i.beta ** 2 * z_h.d_val)\n B[i] = (3 - z_h.alpha) / (z_i.beta ** 2 * z_h.d_val)\n C[i] = (3 - z_j.beta) / (z_i.alpha ** 2 * z_i.d_val)\n D[i] = z_j.beta / (z_i.alpha ** 2 * z_i.d_val)\n R[i] = -B[i] * z_i.psi - D[i] * z_j.psi\n\n # Set up matrix M such that the soln. Mx = R are the theta values.\n M = np.zeros((self.num_points, self.num_points))\n for i in range(self.num_points):\n # Fill i-th row of M\n M[i][i - 1] = A[i]\n M[i][i] = B[i] + C[i]\n M[i][(i + 1) % self.num_points] = D[i]\n\n # Special formulas for first and last rows of M with non-cyclic paths.\n if not self.is_cyclic:\n # First row of M\n alpha_0 = self.points[0].alpha\n beta_1 = self.points[1].beta\n xi_0 = (alpha_0 ** 2 * self.begin_curl) / beta_1 ** 2\n M[0][0] = alpha_0 * xi_0 + 3 - beta_1\n M[0][1] = (3 - alpha_0) * xi_0 + beta_1\n R[0] = -((3 - alpha_0) * xi_0 + beta_1) * self.points[1].psi\n # Last row of M\n alpha_n_1 = self.points[-2].alpha\n beta_n = self.points[-1].beta\n xi_n = (beta_n ** 2 * self.end_curl) / alpha_n_1 ** 2\n M[-1][-2] = (3 - beta_n) * xi_n + alpha_n_1\n M[-1][-1] = (beta_n * xi_n + 3 - alpha_n_1)\n R[-1] = 0\n\n # Solve for theta values.\n thetas = np.linalg.solve(M, R)\n for i, point in enumerate(self.points):\n point.theta = thetas[i]", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def get_x_y_z(drone, p, q, r):\n num_cameras = 2\n camera_constants = [0,math.pi/2]\n rads = np.zeros(num_cameras)\n phis = np.zeros(num_cameras)\n d = np.zeros(num_cameras)\n theta = np.zeros(num_cameras)\n Hs = np.zeros(num_cameras)\n s = 12\n HFOV = math.pi/4\n VFOV = 5*math.pi/36\n HPIX = 1280\n VPIX = 720\n #loop one, where we increment over camera number, and\n # get new information\n\n cent = calculate_centroid(p,q,r)\n for camera_num in range(num_cameras):\n\n A,B = find_a_and_b(p[camera_num],q[camera_num],r[camera_num],cent[camera_num])\n a = find_a(A,B)\n d_in = find_inner_d(a, s)\n angle_c = find_angle_c(a)\n alpha = find_alpha(HFOV, HPIX, A)\n w = find_w(angle_c, s)\n d_out = find_outer_d(w,alpha,a)\n pointy_front = is_point_front(r[camera_num],q[camera_num],p[camera_num],cent[camera_num])\n d[camera_num] = find_d(d_in,d_out,pointy_front)\n theta[camera_num] = find_theta(angle_c,A,B,camera_constants[camera_num])\n k = find_k(drone[camera_num], cent[camera_num])\n angle_k = find_angle_k(k, HFOV, HPIX)\n phi = find_phi(theta[camera_num], angle_k)\n rad = find_r(d[camera_num], angle_k)\n phis[camera_num] = phi\n rads[camera_num] = rad\n\n # end of first loop\n\n cosphis = np.cos(phis)\n sinphis = np.sin(phis)\n big_matrix = np.column_stack((cosphis,sinphis))\n points = np.zeros((int(num_cameras*(num_cameras-1)/2),2))\n i = 0\n for pair in itertools.combinations(range(num_cameras), 2):\n matrix_a = np.vstack((big_matrix[pair[0]],big_matrix[pair[1]]))\n vec_b = np.hstack((rads[pair[0]],rads[pair[1]]))\n point = np.linalg.solve(matrix_a, vec_b)\n points[i] = point\n i += 1\n drone_pos = np.mean(points,axis=0)\n\n # start of third loop\n for camera_num in range(num_cameras):\n d_prime = find_d_prime(d[camera_num], theta[camera_num], drone_pos)\n P,Q,M,N = find_P_Q_M_N(p[camera_num],q[camera_num],r[camera_num])\n h = find_h(d[camera_num],P,Q,M,N)\n angle_4 = find_angle_4(h,d[camera_num])\n Y = find_Y(drone[camera_num], cent[camera_num])\n angle_5 = find_angle_5(Y, VFOV, VPIX)\n angle_6 = angle_5 - angle_4\n h_prime = find_h_prime(d_prime, angle_6)\n Hs[camera_num] = h + h_prime\n drone_h = np.mean(H)\n return np.append(drone_pos,drone_h)", "def find_inplane_to_match(phiA,thetaA,phiB,thetaB,psiA=0,psiB=0):\n\t#from math import pi, sqrt, cos, acos, sin\n\n\tRA = Transform({'type': 'spider', 'phi': phiA, 'theta': thetaA, 'psi': psiA})\n\tRB = Transform({'type': 'spider', 'phi': phiB, 'theta': thetaB, 'psi': psiB})\n\tRBT = RB.transpose()\n\tRABT = RA * RBT\n\n\tRABTeuler = RABT.get_rotation('spider')\n\tRABTphi = RABTeuler['phi']\n\tRABTtheta = RABTeuler['theta']\n\tRABTpsi = RABTeuler['psi']\n\n\t#deg_to_rad = pi/180.0\n\t#thetaAR = thetaA*deg_to_rad\n\t#thetaBR = thetaB*deg_to_rad\n\t#phiAR = phiA*deg_to_rad\n\t#phiBR = phiB *deg_to_rad\n\n\t#d12=cos(thetaAR)*cos(thetaBR) + sin(thetaAR)*sin(thetaBR)*cos(phiAR-phiBR)\n\treturn (-RABTpsi-RABTphi),RABTtheta # 180.0*acos(d12)/pi;", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def determine_in_plane_angle(self, qxy, qz=0.0, theta_incident=0.0):\n \n k = self.get_k()\n if theta_incident==None:\n # Use internal value\n theta_incident = self.theta_incident\n theta_incident_rad = np.radians(theta_incident)\n \n from scipy.optimize import fsolve\n \n def equations(p, qxy=qxy, qz=qz, theta_incident=theta_incident, k=k):\n \n # The variable we are fitting for\n omega_rad, = p\n \n # Non-fit values: qxy, qz, k, theta_incident, k\n \n return ( (qxy*cos(omega_rad))**2 + (qxy*sin(omega_rad)+k*cos(theta_incident_rad))**2 + (qz-k*sin(theta_incident_rad))**2 - k**2 )\n\n \n omega_rad, = fsolve(equations, ( np.radians(5.0) ) )\n #print( 'omega_rad = %.2f (err = %.4f)' % ( omega_rad, equations((omega_rad, )) ) )\n \n omega = abs( np.degrees(omega_rad) )\n #print( 'omega = %.2f (err = %.4f)' % ( omega, equations((omega_rad, )) ) )\n \n \n return omega", "def Distance2RRhoPhi(r1,r2,r3):\n \n # Calculate the square-distances of \n # each pair of atoms.\n r1 = np.array(r1)\n r2 = np.array(r2) \n r3 = np.array(r3)\n \n rr1 = r1*r1\n rr2 = r2*r2\n rr3 = r3*r3\n \n return TriatomicRadialPolar.DistanceSquared2RRhoPhi(rr1,rr2,rr3)", "def rotxaxis(ya, za, angle):\n\n y = ya * math.cos(angle) - za * math.sin(angle) \n z = ya * math.sin(angle) + za * math.cos(angle)\n \n return y, z", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def getPhi(mass,resonance):\n return numpy.arctan((resonance.r0*resonance.w0)/(mass**2-resonance.w0**2)) #need to make this arccotan? invert args", "def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi", "def phitheta(loc):\n x = loc[0]\n y = loc[1]\n z = loc[2]\n r = sqrt(x**2 + y**2 + z**2)\n theta = arcsin(z/r)\n phi = arctan2(y,x)\n return(phi, theta)" ]
[ "0.6163923", "0.6056872", "0.6046275", "0.60347825", "0.59408677", "0.5916736", "0.58936423", "0.5829221", "0.58063835", "0.57062423", "0.57015944", "0.56956786", "0.5686689", "0.56826967", "0.56771237", "0.5673097", "0.56674886", "0.56646484", "0.56586415", "0.56585634", "0.5654848", "0.56488967", "0.5643867", "0.5610945", "0.5607379", "0.5601729", "0.55632824", "0.55601156", "0.5534972", "0.55169934" ]
0.68433595
0
Combine 2D alignent parameters including mirror
def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2): t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":mirror1,"scale":1.0}) t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":mirror2,"scale":1.0}) tt = t2*t1 d = tt.get_params("2D") return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "mirror" ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image", "def test_align(self):\n al = align(self.amp1, self.amp2).m\n\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert.mean(axis=0)[0], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[1], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[2], 0, delta=TestAlign.DELTA)", "def new_mirror(self,alongx,alongy):\n Knew = K.clone()\n if alongx:\n Knew[0,2] = size[0]-Knew[0,2]\n if alongy:\n Knew[1,2] = size[1]-Knew[1,2]\n return CameraInfo(self.size,Knew,self.dist)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def affine_align(x, y, p1, p2, g, s):\n #Create M, Ix, and Iy as Y x X matrices of 0's\n M = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Ix = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Iy = [[0]*(len(x)+1) for i in range(len(y)+1)]\n #Set up initial values for Ix and Iy\n #M infs along both axes\n for i in range(1, len(y)+1):\n M[i][0] = -math.inf\n for j in range(1, len(x)+1):\n M[0][j] = -math.inf\n #Ix: Aligning X with gap, horizontal move, infs along top row\n for i in range(0, len(y)+1):\n Ix[i][0] = -math.inf\n #Gap penalties along left column\n for j in range(1, len(x)+1):\n Ix[0][j] = -g if Ix[0][j-1] == -math.inf else Ix[0][j-1] - s\n #Iy: Aligning Y with gap, vertical move, infs along left column\n for j in range(0, len(x)+1):\n Iy[0][j] = -math.inf\n #Gap penalties along top row\n for i in range(1, len(y)+1):\n Iy[i][0] = -g if Iy[i-1][0] == -math.inf else Iy[i-1][0] - s\n #Populate remaining cells\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n M[i][j] = max(M[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Ix[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Iy[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2))\n Ix[i][j] = max(M[i][j-1] - g,\n Ix[i][j-1] - s)\n Iy[i][j] = max(M[i-1][j] - g,\n Iy[i-1][j] - s)\n #TRACEBACK\n x_ret=\"\"; y_ret=\"\"\n i = len(y); j = len(x)\n #Determine start matrix\n align_scores = (M[i][j], Iy[i][j], Ix[i][j])\n matrix_idx = align_scores.index(max(align_scores))\n #matrix_key will track the current matrix through the traceback\n matrix_key = [\"M\", \"Iy\", \"Ix\"][matrix_idx]\n while i > 0 and j > 0:\n #From M: Check diagonal moves back to all three matrices, align characters\n if matrix_key == \"M\":\n if M[i][j] == M[i-1][j-1] + p1 or M[i][j] == M[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"M\"\n elif M[i][j] == Iy[i-1][j-1] + p1 or M[i][j] == Iy[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Iy\"\n elif M[i][j] == Ix[i-1][j-1] + p1 or M[i][j] == Ix[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Ix\"\n #From Iy: Check vertical move to Iy and M, align y character with x gap\n elif matrix_key == \"Iy\":\n if Iy[i][j] == M[i-1][j] - g:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"M\"\n elif Iy[i][j] == Iy[i-1][j] - s:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"Iy\"\n #From Ix: Check horizontal move to Ix and M, align x character with y gap\n elif matrix_key == \"Ix\":\n if Ix[i][j] == M[i][j-1] - g:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"M\"\n elif Ix[i][j] == Ix[i][j-1] - s:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"Ix\"\n #Finish sequence if edge was reached\n #i>0 means mach remaining characters in y with gaps in x\n if i > 0:\n x_ret = (\"_\"*i) + x_ret\n y_ret = y[0:i] + y_ret\n #j>0 means mach remaining characters in x with gaps in y\n if j > 0:\n x_ret = x[0:j] + x_ret\n y_ret = (\"_\"*j) + y_ret\n #Return alinged strings\n return (x_ret, y_ret)", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty", "def _align(self):\n\n shape = np.shape(self.x)\n\n # Get angle of direction (cbi: center beam index)\n # NOTE: This implementation seems to be unstable, because the shot with the center beam index can be NaN\n # cbi = np.median(np.arange(len(self.x[0, :]))).astype(int)\n # vec1 = [self.x[0, cbi], self.y[0, cbi], 0.0]\n # vec2 = [self.x[-1, cbi], self.y[-1, cbi], 0.0]\n\n # Alternative implementation with mean over all entries within the line.\n # -> should be a good approximation of the line center\n # NOTE: 2019-05-30: Relaxed the criterion even further (mean of first and last 10 scan lines)\n vec1 = [np.nanmedian(self.x[0:10, :]), np.nanmedian(self.y[0:10, :]), 0.0]\n vec2 = [np.nanmedian(self.x[-11:-1, :]), np.nanmedian(self.y[-11:-1, :]), 0.0]\n angle = -1.0*np.arctan((vec2[1]-vec1[1])/(vec2[0]-vec1[0]))\n\n # validity check -> Do not rotate if angle is nan\n if np.isnan(angle):\n return\n\n # Get center point\n xc = np.nanmedian(self.x)\n yc = np.nanmedian(self.y)\n\n # Reform points\n points = [self.x.flatten()-xc, self.y.flatten()-yc]\n\n # Execute the rotation\n rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n points_rotated = rot_matrix.dot(points)\n self.x = np.reshape(points_rotated[0, :], shape)\n self.y = np.reshape(points_rotated[1, :], shape)\n\n # Save conversion parameters for reuse\n self._align_parameters = {'center_point': (xc, yc),\n 'angle': angle,\n 'rotation_matrix': rot_matrix}", "def alignChannels(red, green, blue):\n trans = 30\n h = len(red)\n w = len(red[0])\n alignGreenX, alignGreenY = align2_new(red, green, trans)\n alignBlueX, alignBlueY = align2_new(red, blue, trans)\n result = np.zeros((h + trans*2, w + trans*2, 3))\n result[trans:h+trans, trans:w+trans, 0] = red\n result[trans+alignGreenY:h+trans+alignGreenY, trans + alignGreenX:w+trans+alignGreenX, 1] = green\n result[trans+alignBlueY:h+trans+alignBlueY, trans + alignBlueX:w+trans+alignBlueX, 2] = blue\n \n return result", "def align_reconstruction_to_pdr(reconstruction, data):\n if reconstruction.alignment.aligned:\n return reconstruction\n\n if not data.pdr_shots_exist():\n return reconstruction\n\n pdr_shots_dict = data.load_pdr_shots()\n\n X, Xp = [], []\n onplane, verticals = [], []\n for shot_id in reconstruction.shots.keys():\n X.append(reconstruction.shots[shot_id].pose.get_origin())\n Xp.append(pdr_shots_dict[shot_id][0:3])\n R = reconstruction.shots[shot_id].pose.get_rotation_matrix()\n onplane.append(R[0,:])\n onplane.append(R[2,:])\n verticals.append(R[1,:])\n\n X = np.array(X)\n Xp = np.array(Xp)\n\n # Estimate ground plane.\n p = multiview.fit_plane(X - X.mean(axis=0), onplane, verticals)\n Rplane = multiview.plane_horizontalling_rotation(p)\n X = Rplane.dot(X.T).T\n\n # Estimate 2d similarity to align to pdr predictions\n T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)\n s = np.linalg.det(T[:2, :2]) ** 0.5\n A = np.eye(3)\n A[:2, :2] = T[:2, :2] / s\n A = A.dot(Rplane)\n b = np.array([\n T[0, 2],\n T[1, 2],\n Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment\n ])\n\n # Align points.\n for point in reconstruction.points.values():\n p = s * A.dot(point.coordinates) + b\n point.coordinates = p.tolist()\n\n # Align cameras.\n for shot in reconstruction.shots.values():\n R = shot.pose.get_rotation_matrix()\n t = np.array(shot.pose.translation)\n Rp = R.dot(A.T)\n tp = -Rp.dot(b) + s * t\n try:\n shot.pose.set_rotation_matrix(Rp)\n shot.pose.translation = list(tp)\n except:\n logger.debug(\"unable to transform reconstruction!\")\n\n return reconstruction", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def test_align_points(self):\n mv = [\n [0, 0, 5],\n [5, 0, 5],\n [0, 5, 5]\n ]\n sv = [\n [0, 0, 0],\n [5, 0, 0],\n [0, 5, 0]\n ]\n al = align(self.amp1, self.amp2, mv=mv, sv=sv, method='contPoints').m\n zMax = self.amp1.vert[:, 2].max() - 5\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert[:, 2].max(), zMax, delta=TestAlign.DELTA)", "def mirrorPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n rays = sources.subannulus(220.,221.,100./220.,N,zhat=-1.)\n #Transform to node position\n tran.transform(rays,220,0,0,0,0,0)\n #Set up finite source distance\n raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n rays[4] = rays[1]/raydist\n rays[5] = rays[2]/raydist\n rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n tran.transform(rays,-220+conic.primrad(8450.,220.,8400.),0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-conic.primrad(8450.,220.,8400.),0,-8450.,0,0,0,\\\n coords=coords)\n## surf.wolterprimary(rays,220.,8400.)\n surf.primaryLL(rays,220.,8400.,8500.,8400.,100./220,*plist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n tran.transform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n## surf.woltersecondary(rays,220.,8400.)\n surf.secondaryLL(rays,220.,8400.,1.,8400.,8300.,100./220,*hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2, \\\n airnp.mean(rays[1]), np.mean(rays[2])", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def align(self):\n ...", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def alignment(gram1, gram2):\n # BUG: this loss function causes abnormal optimization behaviors, see\n # comments in past commits\n\n alignment = frobenius_inner_prod(gram1, gram2) /\\\n m.sqrt(frobenius_inner_prod(gram1, gram1) *\n frobenius_inner_prod(gram2, gram2))\n return alignment", "def align(stroke1, stroke2):\n\n x1 = np.array(stroke1.x)\n x2 = np.array(stroke2.x)\n y1 = np.array(stroke1.y)\n y2 = np.array(stroke2.y)\n\n d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n m = d - np.min(d)\n\n Ix1 = np.argmax(x1)\n Ix2 = np.argmax(x2)\n Iy1 = np.argmax(y1)\n Iy2 = np.argmax(y2)\n\n ix1 = np.argmin(x1)\n ix2 = np.argmin(x2)\n iy1 = np.argmin(y1)\n iy2 = np.argmin(y2)\n\n # rephasing :\n u = np.array([(Ix1 - Ix2), (Iy1 - Iy2), (ix1 - ix2), (iy1 - iy2)])\n indice_period = np.argmin(np.abs(u))\n period = u[indice_period]\n new_x1 = np.array(x1[period:].tolist() + x1[0:period].tolist())\n new_y1 = np.array(y1[period:].tolist() + y1[0:period].tolist())\n x1 = new_x1\n y1 = new_y1\n\n # resorting : if symetric part, revert it\n mx = np.max((x1, x2), 0)\n my = np.max((y1, y2), 0)\n sym_score = abs(x1 - x2[::-1]) + abs(y1 - y2[::-1])\n if len(x1[sym_score < 50]) > 20:\n x1[sym_score < 40] = x1[sym_score < 40][::-1]\n y1[sym_score < 40] = y1[sym_score < 40][::-1]\n\n new_d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n new_m = new_d - min(new_d)\n\n return x1, y1, d, new_d, m, new_m", "def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def align_reconstruction_no_numpy(reconstruction, anchor_points_dict):\n modified_shots_dict = {}\n all_anchor_shot_ids = sorted(anchor_points_dict.keys())\n for i in range(len(all_anchor_shot_ids) - 1):\n anchor_coords = []\n recon_coords = []\n\n for j in range(2):\n shot_id = all_anchor_shot_ids[i+j]\n anchor_coords.append(anchor_points_dict[shot_id])\n o = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n\n recon_coords.append(o)\n\n s, A, b = get_affine_transform_2d_no_numpy(anchor_coords, recon_coords)\n\n start_shot_id = all_anchor_shot_ids[i]\n end_shot_id = all_anchor_shot_ids[i+1]\n\n # in first iteration, we transform from first shot of recon\n # in last iteration, we transform until last shot of recon\n shot_ids = sorted(reconstruction.shots.keys())\n if i == 0:\n start_shot_id = shot_ids[0]\n\n if i == len(anchor_points_dict)-2:\n end_shot_id = shot_ids[-1]\n\n new_dict = {}\n\n start_index = _shot_id_to_int(start_shot_id)\n end_index = _shot_id_to_int(end_shot_id)\n\n # transform pdr shots\n for i in range(start_index, end_index + 1):\n shot_id = _int_to_shot_id(i)\n\n if shot_id in reconstruction.shots:\n X = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n A_dot_X = [A[0][0] * X[0] + A[0][1] * X[1] + A[0][2] * X[2],\n A[1][0] * X[0] + A[1][1] * X[1] + A[1][2] * X[2],\n A[2][0] * X[0] + A[2][1] * X[1] + A[2][2] * X[2]]\n Xp = [i * s + j for i, j in zip(A_dot_X, b)]\n new_dict[shot_id] = [Xp[0], Xp[1], Xp[2]]\n\n modified_shots_dict.update(new_dict)\n\n return modified_shots_dict", "def align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp):\n\n\tif prefilt : ref=ref.process(\"filter.matchto\",{\"to\":ptcl})\n\n\t# initial alignment\n\tif align!=None :\n\t\tali=ptcl.align(align[0],ref,align[1],aligncmp[0],aligncmp[1])\n\n\t# refine alignment if requested\n\tif ralign!=None:\n\t\tralign[1][\"xform.align2d\"] = ali.get_attr(\"xform.align2d\")\n\t\tali=ptcl.align(ralign[0],ref,ralign[1],raligncmp[0],raligncmp[1])\n\n\treturn ali" ]
[ "0.6219051", "0.6053582", "0.5752545", "0.57426506", "0.5725641", "0.5692372", "0.5691541", "0.5664221", "0.56270707", "0.5618279", "0.5603235", "0.5593693", "0.5545809", "0.545052", "0.5446854", "0.5441767", "0.54299045", "0.54238695", "0.54219955", "0.5421373", "0.5344707", "0.5332255", "0.531676", "0.5315854", "0.5310657", "0.5307549", "0.5307014", "0.5289081", "0.5280396", "0.5273125" ]
0.6524807
0
Convert a text file that is composed of columns of numbers into spider doc file
def create_spider_doc(fname,spiderdoc): from string import atoi,atof infile = open(fname,"r") lines = infile.readlines() infile.close() nmc = len(lines[0].split()) table=[] for line in lines: data = line.split() for i in xrange(0,nmc): data[i] = atof(data[i]) table.append(data) drop_spider_doc(spiderdoc ,table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_docs(filename):\n \n # open word doc\n word = win32.gencache.EnsureDispatch('Word.Application')\n doc = word.Documents.Open(os.getcwd() + '/' + filename + \".doc\")\n doc.Activate()\n \n # read word doc as list of lists\n data = [doc.Tables(i).Range.Text for i in range(1,5)]\n data = ''.join(data)\n data = data.replace('\\r\\x07\\r\\x07', ', ')\n data = data.replace('\\r\\x07', ', ')\n data = data.split(\", \")\n \n # separate columns into lists\n varname = data[0::4]\n description = data[1::4]\n valuelineref = data[2::4]\n type = data[3::4]\n\n # create pandas dataframe and clean up\n df = pd.DataFrame(list(zip(varname, description, valuelineref, type)))\n doc.Close(True) # is this a function?\n headers = df.iloc[0]\n df = df[1:]\n df.columns = headers\n df['Variable Name'] = df['Variable Name'].str.replace('\\r','')\n \n # store as csv\n df.to_csv(filename + '.csv', index = False)\n return df", "def doc_to_df(self, doc_no):\n doc_txt = pd.DataFrame()\n i = 1\n with open ('{doc_id}.txt'.format(doc_id = doc_no)) as file:\n for line in file:\n words = pd.Series(line.split(' '))\n doc_txt = doc_txt.append(words, ignore_index=True)\n return doc_txt", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def parseManual(filename):\n r = parse(filename)\n res = {}\n for qid in r:\n res[qid] = [int(y) for y in r[qid].split(\",\")]\n return res", "def preformat_coldesc(txt):\r\n\r\n # Converting unnumbered lists directly to DocBook:\r\n #\r\n # The list:\r\n #\r\n # - one\r\n # - two\r\n # - three\r\n #\r\n # Is converted to:\r\n # The list:\r\n # +++<itemizedlist>\r\n # <listitem><simpara> one </simpara></listitem>\r\n # <listitem><simpara> two </simpara></listitem>\r\n # <listitem><simpara> three </simpara></listitem>\r\n # </itemizedlist>\r\n #\r\n # 1. The list must be preceded with a text line, \r\n # followed by two blank lines.\r\n # 2. Each list item must start with \"minus\" (-) without indention.\r\n # Line breaks inside list items are not allowed.\r\n # 3. Two or more list items must exist.\r\n\r\n if not txt: txt=\"\"\r\n g = re.compile(\"(\\n\\s*)((\\n- [^\\n]+){2,})\")\r\n txt = g.sub(r\"\\1 +++<itemizedlist> \\2 </itemizedlist>+++\", txt)\r\n\r\n g = re.compile(r\"(\\+\\+\\+<itemizedlist>.*\\n)- ([^\\n]+)(.*</itemizedlist>\\+\\+\\+)\", re.DOTALL)\r\n while(g.search(txt)):\r\n txt = g.sub(r\"\\1 <listitem><simpara>+++ \\2 +++</simpara></listitem> \\3\", txt)\r\n\r\n return txt", "def parse_page(lines, results):\n weights = None\n column = []\n for line in lines:\n if Utils.is_only_newline(line): # No content in this line, it must separate two columns.\n if column and is_weight_column(column[0]):\n weights = parse_weight_column(column[1:])\n if column and not is_weight_column(column[0]):\n parse_data_rate_column(column, weights, results)\n column = []\n else:\n column.append(line)\n else:\n parse_data_rate_column(column, weights, results)", "def drop_spider_doc(filename, data, comment = None):\n\toutf = open(filename, \"w\")\n\tfrom datetime import datetime\n\toutf.write(\" ; %s %s %s\\n\" % (datetime.now().ctime(), filename, comment))\n\tcount = 1 # start key from 1; otherwise, it is confusing...\n\tfor dat in data:\n\t\ttry:\n\t\t\tnvals = len(dat)\n\t\t\tif nvals <= 5: datstrings = [\"%5d %d\" % (count, nvals)]\n\t\t\telse : datstrings = [\"%6d %d\" % (count, nvals)]\n\t\t\tfor num in dat:\n\t\t\t\tdatstrings.append(\"%12.5g\" % (num))\n\t\texcept TypeError:\n\t\t\t# dat is a single number\n\t\t\tdatstrings = [\"%5d 1%12.5g\" % (count, dat)]\n\t\tdatstrings.append(\"\\n\")\n\t\toutf.write(\"\".join(datstrings))\n\t\tcount += 1\n\toutf.close()", "def parse_file(file):\n for line in open(file,'r'):\n line = line.strip()\n token = line.split('\\t')\n ### loop through ids in second column and print with first columns \n for item in token[1].split(','):\n print item+'\\t'+token[0]", "def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n X = loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n return fast_sparse_matrix(X)", "def parse_2016(year, file):\n with open(file) as file:\n content = file.read()\n # Place, Name, Age, Sex/plc, Sex, Time, Pace, City, State, Bib No\n cols = [\n 'place', 'first_name', 'last_name', 'age', 'sexpl', 'sex',\n 'time', 'pace', 'city', 'state', 'bib'\n ]\n parser = TDParser(columns=cols)\n parser.feed(content)\n return parser.results", "def format_data(file):\r\n \r\n \r\n data = pd.read_csv(file)\r\n data.index = list(data.iloc[:,0])\r\n data = data.iloc[:,1:]\r\n \r\n return data", "def make_corpus(txtfile, word_int, as_strings=False):\n corp = []\n li = load_help(txtfile)\n \n for i in xrange(len(li)):\n if li[i] == 'Value':\n doc = li[i+1]\n doc = doc.strip()\n doc = doc.strip('[')\n doc = doc.strip(']')\n doc = doc.split(', ')\n doc = [str(w) for w in doc]\n \n idoc = []\n for w in doc:\n try:\n i = word_int[w]\n if as_strings:\n idoc.append(w)\n else:\n idoc.append(int(i))\n except:\n pass\n \n corp.append(np.array(idoc))\n \n return corp", "def text_to_columns(text):\n parags = text.split(\"\\n\\n\")\n blocks = []\n for p in parags:\n block = splitter(p)\n blocks.append(block)\n output = \"\"\n for linechunks in zip_longest(*blocks, fillvalue=\"\"):\n line = \"\"\n for lc in linechunks[:-1]:\n line += lc + (COL_WIDTH + COL_SPACE - len(lc)) * \" \"\n line += linechunks[-1]\n output += line + \"\\n\"\n return output", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)", "def read(self):\n dataset = Dataset()\n\n file_list = glob.glob(str(self.directory + \"/*.txt\"))\n\n for file_path in file_list:\n file_name = os.path.basename(file_path)\n\n docid, partid_prefix, = file_name.replace('.txt', '').split('-', 1)\n # partid_prefix not complete due to multiple part cration for a single .txt file\n\n if 'Abstract' in partid_prefix:\n is_abstract = True\n else:\n is_abstract = False\n\n with open(file_path, encoding='utf-8') as file:\n text_raw = file.read()\n\n text = text_raw.replace('** IGNORE LINE **\\n', '')\n paragraph_list = text.split('\\n\\n')\n\n # inital offset for raw_text\n tot_offset = text_raw.count('** IGNORE LINE **\\n') * 18\n offsets = [tot_offset]\n\n for i, text_part in enumerate(paragraph_list):\n # if text is empty (usually last text due to splitting of \"\\n\\n\")\n if text_part != \"\":\n partid = \"{}-p{}\".format(partid_prefix, i + 1)\n\n if docid in dataset:\n dataset.documents[docid].parts[partid] = Part(text_part, is_abstract=is_abstract)\n else:\n document = Document()\n document.parts[partid] = Part(text_part, is_abstract=is_abstract)\n dataset.documents[docid] = document\n\n # add offset for next paragraph\n tot_offset += len(text_part) + 2\n offsets.append(tot_offset)\n\n # to delete last element\n del offsets[-1]\n\n # annotations\n with open(file_path.replace('.txt', '.ann'), encoding='utf-8') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if row[0].startswith('T'):\n entity_type, start, end = row[1].split()\n start = int(start)\n end = int(end)\n text = row[2]\n\n partid = None\n part_index = None\n\n for i in range(len(offsets) - 1):\n if offsets[i+1] > start:\n part_index = i\n break\n\n if part_index is None:\n part_index = len(offsets) - 1\n\n partid = \"{}-p{}\".format(partid_prefix, part_index + 1)\n real_start = start - offsets[part_index]\n real_end = end - offsets[part_index]\n calc_ann_text = document.parts[partid].text[real_start : real_end]\n\n if calc_ann_text != text:\n print(\" ERROR\", docid, part_index, partid, start, offsets, real_start, \"\\n\\t\", text, \"\\n\\t\", calc_ann_text, \"\\n\\t\", document.parts[partid].text)\n\n if entity_type == 'mutation':\n ann = Entity(self.mut_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n elif entity_type == 'gene':\n ann = Entity(self.gene_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n return dataset", "def get_formatted_data(line, indices=None):\n\tfile_data = str.strip(line).split(' ')\n\tif indices is None:\n\t\tdata = list(range(len(file_data)))\n\telse:\n\t\tdata = list(indices)\n\t\t\n\tfor i, file_column in enumerate(data):\n\t\tif file_column is not None:\n\t\t\tdatum = file_data[file_column]\n\t\telse:\n\t\t\tdatum = ' '\n\t\tif '.' in datum:\n\t\t\ttry:\n\t\t\t\tdatum = float(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdatum = int(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdata[i] = datum\n\treturn data", "def split_data_corpus(filename):\n\n fid = 1\n with open(filename, 'r') as infile:\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid), 'wb')\n for line, doc in enumerate(infile):\n f.write(doc)\n if not line % 1000 and line > 1:\n f.close()\n fid += 1\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid),\n 'wb')\n f.close()", "def convert_txt_to_data():\n pass", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n d = np.loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n if d.shape[1] < 3:\n raise ValueError('invalid number of columns in input')\n row = d[:,0]-index_offset\n col = d[:,1]-index_offset\n data = d[:,2]\n shape = (max(row)+1,max(col)+1)\n return csr_matrix((data,(row,col)),shape=shape)", "def parse_from_text_file(path):\n with open(path, 'r') as f:\n lines = f.read().split('\\n')\n res = []\n for l in lines:\n print(l)\n if len(l) < 1 or not l[0] in 'X#' :\n continue\n l = l.replace('#', str(BLOCK.WALL))\n l = l.replace('X', str(BLOCK.WALL))\n l = l.replace(' ', str(BLOCK.ROAD))\n l = l.replace('@', str(BLOCK.ROBOT))\n l = l.replace('+', str(BLOCK.ROBOT))\n l = l.replace('.', str(BLOCK.GOAL))\n l = l.replace('$', str(BLOCK.DIAM))\n l = l.replace('*', str(BLOCK.DIAM_ON_GOAL))\n l = [ int(x) for x in l]\n res.append(l)\n print(res)\n return res", "def _gen_data(fhs, columns, sep):\n for fh in fhs:\n for line in fh:\n if line[0] == \"#\": continue\n toks = line.split(sep)\n yield toks[columns[0]], int(toks[columns[1]]), float(toks[columns[2]])", "def findDocumentsTwo():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile,\\\n open('documentsTwo.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided4 = counterTwo / 4\n lines4 = lineTwo / 4\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided4: ' + str(divided4) + '\\n')\n writefile.write('lines divided by 4: ' + str(lines4) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines4) + '\\n')\n writefile.write('3: ' + str((lines4 * 2)) + '\\n')\n writefile.write('4: ' + str((lines4 * 3)))\n print('divided4: ' + str(divided4))\n print('lines divided by 4: ' + str(lines4))", "def parse_design(self, detailed_design_file):", "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def read_file(infile,column_num):\n\n \n column_list = []\n\n with open(infile,'r') as f:\n\n fl = f.readlines()\n\n for line in fl:\n \n \n value = int(line.split()[int(column_num)-1])\n column_list.append(value)\n\n\n return column_list", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def load_data(filename):\n\n with open(filename) as f_obj: # Open file to read & assign file object\n for line in f_obj: # Read each line as text\n print(int(line)) # Convert to int & display" ]
[ "0.5493499", "0.54529065", "0.5267102", "0.52564555", "0.5165884", "0.5158104", "0.5139018", "0.5111768", "0.50976145", "0.5082693", "0.50638324", "0.50472414", "0.5038567", "0.5021236", "0.5013941", "0.5003135", "0.49983117", "0.49895382", "0.49807015", "0.49714673", "0.49597633", "0.49453783", "0.49351558", "0.49332955", "0.49314943", "0.4921506", "0.49201792", "0.49073872", "0.49036503", "0.4901937" ]
0.6535076
0
Output the data in slice iz, row ix of an image to standard out.
def dump_row(input, fname, ix=0, iz=0): fout = open(fname, "w") image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() fout.write("# z = %d slice, x = %d row)\n" % (iz, ix)) line = [] for iy in xrange(ny): fout.write("%d\t%12.5g\n" % (iy, image.get_value_at(ix,iy,iz))) fout.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def _write_image(self):\r\n # Create an output raster with the correct number of rows and columns.\r\n gtiff_driver = gdal.GetDriverByName('GTiff')\r\n out_ds = gtiff_driver.Create(os.path.join(self.out_folder, self.out_file_name), self.column, self.row, 1)\r\n out_ds.SetProjection(self.in_ds.GetProjection())\r\n\r\n # Convert the offsets to real-world coordinates for the georeferencing info.\r\n # We can't use the coordinates above because they don't correspond to the pixel edges.\r\n subset_ulx, subset_uly = gdal.ApplyGeoTransform(self.in_gt, self.off_ulx, self.off_uly)\r\n out_gt = list(self.in_gt)\r\n out_gt[0] = subset_ulx\r\n out_gt[3] = subset_uly\r\n out_ds.SetGeoTransform(out_gt)\r\n\r\n data = self.read_image()\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(data)\r\n\r\n del out_ds", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()" ]
[ "0.71635914", "0.70319664", "0.7031056", "0.6987682", "0.68559015", "0.6734682", "0.6646704", "0.64677995", "0.6236939", "0.60654515", "0.59142405", "0.56288266", "0.5549433", "0.5510461", "0.5490116", "0.5405265", "0.54043865", "0.5386131", "0.5378602", "0.53766", "0.5349916", "0.53345853", "0.53233147", "0.5274436", "0.52617544", "0.5247129", "0.5245189", "0.5230949", "0.5229908", "0.51902175" ]
0.7089794
1
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; symmetry if this is set to pointgroup symmetry (cn or dn) or helical symmetry with pointgroup symmetry (scn or sdn), it will yield angles from the asymmetric unit, not the specified range;
def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = "Minus", symmetry='c1'): from math import pi, sqrt, cos, acos, tan, sin from utilities import even_angles_cd from string import lower,split angles = [] symmetryLower = symmetry.lower() symmetry_string = split(symmetry)[0] if (symmetry_string[0] == "c"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi) if(int(symmetry_string[1:]) > 1): if( int(symmetry_string[1:])%2 ==0): qt = 360.0/int(symmetry_string[1:]) else: qt = 180.0/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "d"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi) if (int(symmetry_string[1:])%2 == 0): qt = 360.0/2/int(symmetry_string[1:]) else: qt = 180.0/2/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "s"): #if symetry is "s", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2 # for helical, theta1 cannot be 0.0 if theta1 > 90.0: ERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1) if theta1 == 0.0: theta1 =90.0 theta_number = int((90.0 - theta1)/theta2) #for helical, symmetry = s or scn cn = int(symmetry_string[2:]) for j in xrange(theta_number,-1, -1): if( j == 0): if (symmetry_string[1] =="c"): if cn%2 == 0: k=int(359.99/cn/delta) else: k=int(359.99/2/cn/delta) elif (symmetry_string[1] =="d"): if cn%2 == 0: k=int(359.99/2/cn/delta) else: k=int(359.99/4/cn/delta) else: ERROR("For helical strucutre, we only support scn and sdn symmetry","even_angles",1) else: if (symmetry_string[1] =="c"): k=int(359.99/cn/delta) elif (symmetry_string[1] =="d"): k=int(359.99/2/cn/delta) for i in xrange(k+1): angles.append([i*delta,90.0-j*theta2,90.0]) else : # This is very close to the Saff even_angles routine on the asymmetric unit; # the only parameters used are symmetry and delta # The formulae are given in the Transform Class Paper # The symmetric unit nVec=[]; # x,y,z triples # is defined by three points b,c, v of Fig 2 of the paper # b is (0,0,1) # c is (sin(thetac),0,cos(thetac)) # a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac)) # f is the normalized sum of all 3 # The possible symmetries are in list_syms # The symmetry determines thetac and Omega # The spherical area is Omega - pi/3; # should be equal to 4 *pi/(3*# Faces) # # symmetry ='tet'; delta = 6; scrunch = 0.9 # closeness factor to eliminate oversampling corners #nVec=[] # x,y,z triples piOver = pi/180.0 Count=0 # used to count the number of angles if (symmetryLower[0:3] =="tet"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps elif (symmetryLower[0:3] =="oct"): m=4.0; fudge=0.8 elif (symmetryLower[0:3] =="ico"): m=5.0; fudge=0.95 else: ERROR("allowable symmetries are cn, dn, tet, oct, icos","even_angles",1) n=3.0 OmegaR = 2.0*pi/m; cosOmega= cos(OmegaR) Edges = 2.0*m*n/(2.0*(m+n)-m*n) Faces = 2*Edges/n Area = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega costhetac = cosOmega/(1-cosOmega) deltaRad= delta*pi/180 NumPoints = int(Area/(deltaRad*deltaRad)) fheight = 1/sqrt(3)/ (tan(OmegaR/2.0)) z0 = costhetac # initialize loop z = z0 phi = 0 Deltaz = (1-costhetac)/(NumPoints-1) #[1, phi,180.0*acos(z)/pi,0.] anglesLast = [phi,180.0*acos(z)/pi,0.] angles.append(anglesLast) nLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] nVec = [] nVec.append(nLast) Count +=1 for k in xrange(1,(NumPoints-1)): z=z0 + Deltaz*k # Is it higher than fhat or lower r= sqrt(1-z*z) if (z > fheight): phiRmax= OmegaR/2.0 if (z<= fheight): thetaR = acos(z); cosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega); phiMax = 180.0*( OmegaR - acos(cosStuff))/pi angleJump = fudge* delta/r phi = (phi + angleJump)%(phiMax) anglesNew = [phi,180.0*acos(z)/pi,0.]; nNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] diffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] diffMin = min(diffangleVec) if (diffMin>angleJump*piOver *scrunch): Count +=1 angles.append(anglesNew) nVec.append(nNew) #[Count, phi,180*acos(z)/pi,0.] anglesLast = anglesNew nLast=nNew angles.append( [0.0, 0.0, 0.0] ) nLast= [ 0., 0. , 1.] nVec.append(nLast) if(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] ) angles.reverse() if(phiEqpsi == "Minus"): for i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0 #print(Count,NumPoints) # look at the distribution # Count =len(angles); piOver= pi/180.0; # phiVec = [ angles[k][0] for k in range(Count)] ; # thetaVec = [ angles[k][1] for k in range(Count)] ; # xVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ] # yVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ] # zVec = [cos(piOver * angles[k][1]) for k in range(Count) ] # pylab.plot(yVec,zVec,'.'); pylab.show() return angles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta, psi\n\t\tfor i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);\n\telse: #elif (method == 'S'):\n\t\tDeltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)\n\t\ts = delta*pi/180.0\n\t\tNFactor = 3.6/s\n\t\twedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)\n\t\tNumPoints = int(NFactor*NFactor*wedgeFactor)\n\t\tangles.append([phi1, theta1, 0.0])\n\t\tz1 = cos(theta1*pi/180.0); \tphi=phi1 # initialize loop\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z1 + Deltaz*k/(NumPoints-1)\n\t\t\tr= sqrt(1-z*z)\n\t\t\tphi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))\n\t\t\t#[k, phi,180*acos(z)/pi, 0]\n\t\t\tangles.append([phi, 180*acos(z)/pi, 0.0])\n\t\t#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07\n\tif (phiEQpsi == 'Minus'):\n\t\tfor k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0\n\tif( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )\n\n\treturn angles", "def angles(self, num: int) -> Iterable[float]:\n if num < 2:\n raise ValueError(\"num >= 2\")\n start = self.dxf.start_angle % 360\n stop = self.dxf.end_angle % 360\n if stop <= start:\n stop += 360\n for angle in linspace(start, stop, num=num, endpoint=True):\n yield angle % 360", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def getAngles(self):\n try:\n return self._angleList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"ANGLE_FORCE_CONSTANT\"]\n angleEquil=self._raw_data[\"ANGLE_EQUIL_VALUE\"]\n anglePointers = self._raw_data[\"ANGLES_INC_HYDROGEN\"] \\\n +self._raw_data[\"ANGLES_WITHOUT_HYDROGEN\"]\n self._angleList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole/(units.radian*units.radian)).conversion_factor_to(units.kilojoule_per_mole/(units.radian*units.radian))\n for ii in range(0,len(anglePointers),4):\n if int(anglePointers[ii])<0 or \\\n int(anglePointers[ii+1])<0 or \\\n int(anglePointers[ii+2])<0:\n raise Exception(\"Found negative angle atom pointers %s\"\n % ((anglePointers[ii],\n anglePointers[ii+1],\n anglePointers[ii+2]),))\n iType=int(anglePointers[ii+3])-1\n self._angleList.append((int(anglePointers[ii])//3,\n int(anglePointers[ii+1])//3,\n int(anglePointers[ii+2])//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(angleEquil[iType])))\n return self._angleList", "def angle_calc(sides):\n return 360//sides", "def get_angles(sides):\n return [get_angle(sides[1], sides[2], sides[0]),\n get_angle(sides[2], sides[0], sides[1]),\n get_angle(sides[0], sides[1], sides[2])]", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def angles(self) -> list[npt.NDArray[np.float_]]:\n result = []\n a = cast(Segment, self.edges[-1])\n for b in self.edges:\n b = cast(Segment, b)\n result.append(angle(a.vertices[1], a.vertices[0], b.vertices[1]))\n a = b\n\n return result", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def steps_to_angle():\n pass", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def get_euler_angles_from_T(T):\n pass", "def get_internal_angles(self):\n\n angles = []\n\n for elx, elz in zip(self.grid['x'], self.grid['z']):\n el_angles = []\n xy = np.vstack((elx, elz))\n for i in range(0, elx.size):\n i1 = (i - 1) % elx.size\n i2 = (i + 1) % elx.size\n\n a = (xy[:, i] - xy[:, i1])\n b = (xy[:, i2] - xy[:, i])\n # note that nodes are ordered counter-clockwise!\n angle = np.pi - np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1]\n )\n el_angles.append(angle * 180 / np.pi)\n angles.append(el_angles)\n return np.array(angles)", "def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)", "def angles(self):\n self._sort_measurements()\n return self._angles", "def archimedean(\n radius_start,\n radius_end,\n step,\n center=None,\n close=False,\n point_start=None,\n angle_start=None,\n arc_res=None):\n\n if radius_start > radius_end:\n sign = 1\n else:\n sign = -1\n\n # the spiral constant\n # evaluated from: step = K * 2 * pi\n K = step / (np.pi * 2)\n\n # use our constant to find angular start and end\n theta_start = radius_start / K\n theta_end = radius_end / K\n\n # if not passed set angular resolution\n if arc_res is None:\n arc_res = constants.default_arc\n\n arc_count = int(np.ceil((\n abs(theta_end - theta_start)) / arc_res))\n\n # given that arcs will share points how many\n # points on the helix do we need\n arc_index, point_count = arc_indexes(arc_count)\n\n assert arc_index.max() == point_count - 1\n\n # create an array of angles\n theta = np.linspace(theta_start, theta_end, point_count)\n\n # use the spiral equation to generate radii\n radii = theta * K\n\n # make sure they match\n assert np.isclose(radii[0], radius_start)\n assert np.isclose(radii[-1], radius_end)\n\n # do offset AFTER radius calculation\n if angle_start is not None:\n theta += (angle_start - theta_start)\n\n # convert polar coordinates to 2D cartesian\n points = np.column_stack(\n (np.cos(theta), np.sin(theta))) * radii.reshape((-1, 1))\n\n if close:\n\n # get indexes of arcs required to close\n close_idx, close_ct = arc_indexes(\n int(np.ceil((np.pi * 2) / arc_res)))\n\n # the additional angles needed to close\n # we are cutting off the first point as its a duplicate\n t_close = np.linspace(theta[-1],\n theta[-1] + np.pi * 2 * sign,\n close_ct)[1:]\n\n # additional points to close the arc\n closer = np.column_stack((\n np.cos(t_close), np.sin(t_close))) * radii[-1]\n assert len(closer) == close_ct - 1\n assert len(points) == point_count\n\n # stack points with closing arc\n points = np.vstack((points, closer))\n # add the additional points to the count\n point_count += close_ct - 1\n # add the additional arc indexes\n\n arc_index = np.vstack((\n arc_index, arc_index[-1][-1] + close_idx))\n\n assert len(points) == point_count\n # max index of arcs should correspond to points\n assert arc_index[-1][-1] == point_count - 1\n\n if center is not None:\n points += center\n\n # convert sequential points into three point arcs\n arcs = points[arc_index]\n\n if constants.strict:\n # check all arcs to make sure the correspond\n for a, b in zip(arcs[:-1], arcs[1:]):\n assert np.allclose(a[2], b[0])\n\n if point_start is not None:\n a, b = np.clip(\n (point_start[:2] - center[:2]) / radius_start,\n -1.0, 1.0)\n assert np.isclose(a, np.cos(angle_start), atol=1e-3)\n assert np.isclose(b, np.sin(angle_start), atol=1e-3)\n\n return arcs", "def angles(self):\n penult = self._coordinates[-2]\n last = self._coordinates[-1]\n angles = []\n for c in self._coordinates:\n angle = (math.atan2(penult[0]-last[0], penult[1]-last[1]) -\n math.atan2(c[0]-last[0], c[1]-last[1]))\n angles.append(angle)\n penult, last = last, c\n return sorted(angles)", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def __generate_LSP_angles__(self):\n self.LSP_ANGLES = np.linspace(0, self._range_lsp_angle, ArrayInfo.len_lsp) - (self._range_lsp_angle / 2)\n self.LSP_MIN_ANGLE = np.min(self.LSP_ANGLES) - 0.5 # Angles outside of this range are discarded\n self.LSP_MAX_ANGLE = np.max(self.LSP_ANGLES) + 0.5 # Angles outside of this range are discarded", "def angle(self, dates, values, angle_type):\n \n print(\"Angels running...\")\n exa_days = []\n exa_idx, extms = self.extrema(values, angle_type)\n for i in range(len(exa_idx)):\n exa_days.append(dates[exa_idx[i]])\n def_dates, def_point, k = self.calAng(exa_days, extms, angle_type)\n print(\"Angles done!\")\n return def_dates, def_point, k", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def euler2Q(self, (phi, theta, psi)):\n\thalf_phi = 0.5*phi;\n\thalf_theta = 0.5*theta;\n\thalf_psi = 0.5*psi;\n\n return np.asarray([\n (cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),\n (sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),\n (cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),\n (cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))\n ]);", "def standarize_euler(euler: np.ndarray, in_radian=True) -> np.ndarray:\n if not in_radian:\n euler = np.radians(euler)\n return np.where(\n euler<0, \n (euler+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),\n euler%(2*np.pi)\n )", "def projection_angles(name):\n if name == 'xy':\n return 0, 0, 0\n elif name == 'xz':\n return -np.pi/2, 0, 0\n elif name == 'yz':\n return -np.pi/2, 0, -np.pi/2\n elif name == 'yx':\n return 0, np.pi, np.pi/2\n elif name == 'zx':\n return np.pi/2, np.pi/2, 0\n elif name == 'zy':\n return np.pi, np.pi/2, np.pi\n else:\n raise ValueError('Invalid projection name: {!r}.'.format(name))", "def number_to_angle(number: int, number_sectors: int) -> [np.float, np.float, np.float]:\n angles_phi = np.linspace(-22.5, 22.5, number_sectors)\n angles_theta = np.linspace(67.5, 112.5, number_sectors)\n theta = number // number_sectors\n phi = number % number_sectors\n return asCartesian([1, angles_theta[theta], angles_phi[phi]])", "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal" ]
[ "0.63690925", "0.59872353", "0.5834576", "0.5605631", "0.5481775", "0.5450778", "0.5426255", "0.5415383", "0.540476", "0.54044795", "0.53929913", "0.5386991", "0.5379755", "0.53729224", "0.5372181", "0.5360525", "0.53372157", "0.5305089", "0.53043467", "0.5274299", "0.52560574", "0.52554476", "0.52454716", "0.5230425", "0.5197291", "0.51921314", "0.51663566", "0.51654804", "0.5160156", "0.5153621" ]
0.616338
1
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; phiEQpsi set this to 'Minus', if you want psi=phi;
def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'): from math import pi, sqrt, cos, acos angles = [] if (method == 'P'): temp = Util.even_angles(delta, theta1, theta2, phi1, phi2) # phi, theta, psi for i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]); else: #elif (method == 'S'): Deltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0) s = delta*pi/180.0 NFactor = 3.6/s wedgeFactor = abs(Deltaz*(phi2-phi1)/720.0) NumPoints = int(NFactor*NFactor*wedgeFactor) angles.append([phi1, theta1, 0.0]) z1 = cos(theta1*pi/180.0); phi=phi1 # initialize loop for k in xrange(1,(NumPoints-1)): z=z1 + Deltaz*k/(NumPoints-1) r= sqrt(1-z*z) phi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1)) #[k, phi,180*acos(z)/pi, 0] angles.append([phi, 180*acos(z)/pi, 0.0]) #angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07 if (phiEQpsi == 'Minus'): for k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0 if( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] ) return angles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = \"Minus\", symmetry='c1'):\n\n\tfrom math import pi, sqrt, cos, acos, tan, sin\n\tfrom utilities import even_angles_cd\n\tfrom string import lower,split\n\tangles = []\n\tsymmetryLower = symmetry.lower()\n\tsymmetry_string = split(symmetry)[0]\n\tif (symmetry_string[0] == \"c\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif(int(symmetry_string[1:]) > 1):\n\t\t\t\tif( int(symmetry_string[1:])%2 ==0):\n\t\t\t\t\tqt = 360.0/int(symmetry_string[1:])\n\t\t\t\telse:\n\t\t\t\t\tqt = 180.0/int(symmetry_string[1:])\n\t\t\t\tn = len(angles)\n\t\t\t\tfor i in xrange(n):\n\t\t\t\t\tt = n-i-1\n\t\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"d\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif (int(symmetry_string[1:])%2 == 0):\n\t\t\t\tqt = 360.0/2/int(symmetry_string[1:])\n\t\t\telse:\n\t\t\t\tqt = 180.0/2/int(symmetry_string[1:])\n\t\t\tn = len(angles)\n\t\t\tfor i in xrange(n):\n\t\t\t\tt = n-i-1\n\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"s\"):\n\t\n\t#if symetry is \"s\", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2\n\t\t# for helical, theta1 cannot be 0.0\n\t\tif theta1 > 90.0:\n\t\t\tERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)\n\t\tif theta1 == 0.0: theta1 =90.0\n\t\ttheta_number = int((90.0 - theta1)/theta2)\n\t\t#for helical, symmetry = s or scn\n\t\tcn = int(symmetry_string[2:])\n\t\tfor j in xrange(theta_number,-1, -1):\n\n\t\t\tif( j == 0):\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/4/cn/delta)\n\t\t\t\telse:\n\t\t\t\t\tERROR(\"For helical strucutre, we only support scn and sdn symmetry\",\"even_angles\",1)\n\n\t\t\telse:\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\t\t\n\t\t\tfor i in xrange(k+1):\n\t\t\t\t\tangles.append([i*delta,90.0-j*theta2,90.0])\n\n\n\telse : # This is very close to the Saff even_angles routine on the asymmetric unit;\n\t\t# the only parameters used are symmetry and delta\n\t\t# The formulae are given in the Transform Class Paper\n\t\t# The symmetric unit \t\tnVec=[]; # x,y,z triples\n\t\t# is defined by three points b,c, v of Fig 2 of the paper\n\t\t# b is (0,0,1)\n\t\t# c is (sin(thetac),0,cos(thetac))\n\t\t# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))\n\t\t# f is the normalized sum of all 3\n\t\t\n\t\t# The possible symmetries are in list_syms\n\t\t# The symmetry determines thetac and Omega\n\t\t# The spherical area is Omega - pi/3; \n\t\t# should be equal to 4 *pi/(3*# Faces)\n\t\t#\t\t\n\t\t# symmetry ='tet'; delta = 6;\n\n\t\tscrunch = 0.9 # closeness factor to eliminate oversampling corners\n\t\t#nVec=[] # x,y,z triples\n\n\t\tpiOver = pi/180.0\n\t\tCount=0 # used to count the number of angles\n\t\t\n\t\tif (symmetryLower[0:3] ==\"tet\"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps\n\t\telif (symmetryLower[0:3] ==\"oct\"): m=4.0; fudge=0.8\n\t\telif (symmetryLower[0:3] ==\"ico\"): m=5.0; fudge=0.95\n\t\telse: ERROR(\"allowable symmetries are cn, dn, tet, oct, icos\",\"even_angles\",1)\n\n\t\tn=3.0\n\t\tOmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)\n\t\tEdges = 2.0*m*n/(2.0*(m+n)-m*n)\n\t\tFaces = 2*Edges/n\n\t\tArea = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega\n\t\tcosthetac = cosOmega/(1-cosOmega)\n\t\tdeltaRad= delta*pi/180\n\t\tNumPoints = int(Area/(deltaRad*deltaRad))\n\t\tfheight = 1/sqrt(3)/ (tan(OmegaR/2.0))\n\n\t\tz0 = costhetac # initialize loop\t\n\t\tz = z0\n\t\tphi = 0\n\t\tDeltaz = (1-costhetac)/(NumPoints-1)\n\n\t\t#[1, phi,180.0*acos(z)/pi,0.]\n\t\tanglesLast = [phi,180.0*acos(z)/pi,0.]\n\t\tangles.append(anglesLast)\n\t\tnLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\tnVec = []\n\t\tnVec.append(nLast)\n\n\t\tCount +=1\n\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z0 + Deltaz*k # Is it higher than fhat or lower\n\t\t\tr= sqrt(1-z*z)\n\t\t\tif (z > fheight): phiRmax= OmegaR/2.0\n\t\t\tif (z<= fheight):\n\t\t\t\tthetaR = acos(z); \n\t\t\t\tcosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);\n\t\t\t\tphiMax = 180.0*( OmegaR - acos(cosStuff))/pi\n\t\t\tangleJump = fudge* delta/r\n\t\t\tphi = (phi + angleJump)%(phiMax)\n\t\t\tanglesNew = [phi,180.0*acos(z)/pi,0.];\n\t\t\tnNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\t\tdiffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] \n\t\t\tdiffMin = min(diffangleVec)\n\t\t\tif (diffMin>angleJump*piOver *scrunch):\n\t\t\t\tCount +=1\n\t\t\t\tangles.append(anglesNew)\n\t\t\t\tnVec.append(nNew)\n\t\t\t\t#[Count, phi,180*acos(z)/pi,0.]\n\t\t\tanglesLast = anglesNew\n\t\t\tnLast=nNew\n\n\t\tangles.append( [0.0, 0.0, 0.0] )\n\t\tnLast= [ 0., 0. , 1.]\n\t\tnVec.append(nLast)\n\t\tif(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )\n\t\t\n\t\tangles.reverse()\n\t\tif(phiEqpsi == \"Minus\"):\n\t\t\tfor i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0\n\t\t#print(Count,NumPoints)\n\t\t\n#\t\tlook at the distribution\n#\t\tCount =len(angles); piOver= pi/180.0;\n#\t\tphiVec = [ angles[k][0] for k in range(Count)] ;\n#\t\tthetaVec = [ angles[k][1] for k in range(Count)] ;\n#\t\txVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tyVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tzVec = [cos(piOver * angles[k][1]) for k in range(Count) ]\n#\t\tpylab.plot(yVec,zVec,'.'); pylab.show()\n\n\n\treturn angles", "def euler2Q(self, (phi, theta, psi)):\n\thalf_phi = 0.5*phi;\n\thalf_theta = 0.5*theta;\n\thalf_psi = 0.5*psi;\n\n return np.asarray([\n (cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),\n (sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),\n (cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),\n (cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))\n ]);", "def Euler2Rotation(phi, theta, psi):\n # only call sin and cos once for each angle to speed up rendering\n c_phi = np.cos(phi)\n s_phi = np.sin(phi)\n c_theta = np.cos(theta)\n s_theta = np.sin(theta)\n c_psi = np.cos(psi)\n s_psi = np.sin(psi)\n\n R_roll = np.array([[1, 0, 0],\n [0, c_phi, s_phi],\n [0, -s_phi, c_phi]])\n R_pitch = np.array([[c_theta, 0, -s_theta],\n [0, 1, 0],\n [s_theta, 0, c_theta]])\n R_yaw = np.array([[c_psi, s_psi, 0],\n [-s_psi, c_psi, 0],\n [0, 0, 1]])\n\n R = R_roll @ R_pitch @ R_yaw # inertial to body (Equation 2.4 in book)\n return R.T # transpose to return body to inertial", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 130 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57 and not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #Omega is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent phi\n return [phi]", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def calculate_angles_to_rotate_vector(self, starting_vec, ending_vec, starting_angles=None, search_method=0):\n #Find the starting rotation matrix\n if not starting_angles is None:\n (phi, chi, omega) = starting_angles[0:3]\n starting_rot_matrix = numpy_utils.rotation_matrix(phi, chi, omega)\n #Rotate the starting vector\n starting_vec = np.dot(starting_rot_matrix, column(starting_vec)).flatten()\n\n #Find the rotation matrix that satisfies ending_vec = R . starting_vec\n\n #The cross product of q0 X q_over_a gives a rotation axis to use\n rotation_axis = np.cross(starting_vec, ending_vec)\n\n #Now we find the rotation angle about that axis that puts q0 on q_over_a\n angle = np.arccos( np.dot(starting_vec, ending_vec) / (vector_length(starting_vec)*vector_length(ending_vec)))\n\n #Make the rot. matrix\n R = numpy_utils.rotation_matrix_around_vector(rotation_axis, angle)\n\n if not starting_angles is None:\n #The final rotation we want is starting_rot_matrix 1st; R second.\n # So this is the resulting matrix\n R = np.dot(R, starting_rot_matrix)\n\n #The function finds some angles that work\n angles = numpy_utils.angles_from_rotation_matrix(R)\n\n #Position is always allowed\n return (angles)", "def refine_angles(self, method='nelder', **opts):\n self.set_idx()\n from lmfit import fit_report, minimize\n p0 = self.define_parameters(**opts)\n self.result = minimize(self.angle_residuals, p0, method=method)\n self.fit_report = fit_report(self.result)\n if self.result.success:\n self.get_parameters(self.result.params)", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def angle_calc(sides):\n return 360//sides", "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def phi_list ( self ) :\n return self.__phi_list", "def steps_to_angle():\n pass", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def equations(p):\n [x, y] = p\n list = [x - 5 , y - 5]\n return list", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, chi]", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def solver(self, alpha):\n if alpha == 0: # skip divided by 0 error\n return [0], [0] # r and phi=0\n\n if alpha == 180:\n return [self.D], [0] # if angle= pi then, tan(pi)=0 so 1/tan=1/0\n\n # initial value for position and angular speed\n y0 = [1/self.D, 1/(self.D*math.tan(math.radians(alpha)))]\n sol = solve_ivp(fun=self._diff_eq, t_span=[0, 10*pi], y0=y0, method='Radau', events=[self._eventRs]) #, self._eventR])#,t_eval=np.linspace(0, t_max, 10000)) #dense_output=False\n\n if sol.t[-1] == 10*pi:\n raise StopIteration(\"solver error, alpha reached computation limit (loop number)\")\n\n phi = np.array(sol.t)\n r = np.abs(1/sol.y[0, :]) # must use this because solver can't be stop before infinity because negative\n\n return r, phi", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw" ]
[ "0.6162403", "0.60886765", "0.5816294", "0.5785667", "0.5628353", "0.5463198", "0.54578865", "0.5440061", "0.5435098", "0.5419207", "0.53673875", "0.53652376", "0.5361059", "0.5337354", "0.5325969", "0.53146446", "0.52769536", "0.52645385", "0.52579445", "0.52271026", "0.52208894", "0.5220837", "0.5214414", "0.5214414", "0.5214414", "0.5214268", "0.5204542", "0.51765436", "0.5169213", "0.5155186" ]
0.6891914
0
Get the in_plane angle from two images and output the crosss correlation value The function won't destroy input two images This is the angle that rotates the first image, ima, into the second image, ref. The sense of the rotation is clockwise. center=1 means image is first centered, then rotation angle is found
def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1): from alignment import Numrinit, ringwe, Applyws, ormq from filter import fshift first_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp) nx=ima.get_xsize() if(last_ring == -1): last_ring=int(nx/2)-2 cnx = int(nx/2)+1 cny = cnx mode = "F" #precalculate rings numr = Numrinit(first_ring, last_ring, rstep, mode) wr = ringwe(numr, mode) if(center==1): cs = [0.0]*2 # additio cs = ref.phase_cog() ref1 = fshift(ref, -cs[0], -cs[1]) cimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode) cs = ima.phase_cog() ima1 = fshift(ima, -cs[0], -cs[1]) else: ima1=ima.copy() cimage=Util.Polar2Dm(ref, cnx, cny, numr, mode) Util.Frngs(cimage, numr) Applyws(cimage, numr, wr) [angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny) return angt,sxst, syst, mirrort, peakt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_opt_rotate(obj_img, back_img,\n back_center_x, back_center_y,\n obj_center_x, obj_center_y,\n prev_rot_angle=0.,\n is_erosion=False):\n width = obj_img.shape[0]\n rot_img = ndimage.rotate(obj_img, prev_rot_angle, reshape=False)\n induce_x, induce_y = int(back_center_x - obj_center_x), int(back_center_y - obj_center_y)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rot_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n angle_amount = 4.\n else:\n angle_amount = 16.\n # check combine_img.dtype; rot_img.dtype; back_img\n curr_angle = prev_rot_angle\n while angle_amount > 0.5:\n angle_amount /= 2.\n\n rotate_1 = ndimage.rotate(obj_img, curr_angle + angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y+width, induce_x:induce_x+width] -= rotate_1\n neg_count_1 = len(np.argwhere(combine_img < 0))\n\n rotate_2 = ndimage.rotate(obj_img, curr_angle - angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rotate_2\n neg_count_2 = len(np.argwhere(combine_img < 0))\n\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_angle = curr_angle + angle_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_angle = curr_angle - angle_amount\n # print(curr_angle)\n # print(neg_count, neg_count_1, neg_count_2)\n # print('Negative Pix Count Rotation: %d.' % neg_count)\n # print('Optimal Rotation: ', curr_angle)\n return curr_angle, neg_count", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def calculate_translation(reference_im:np.ndarray, \n target_im:np.ndarray,\n ref_to_tar_rotation:np.ndarray=None,\n use_autocorr:bool=True,\n alignment_kwargs:dict={},\n verbose:bool=True,\n ):\n from math import pi\n import cv2\n ## quality check\n # images\n if np.shape(reference_im) != np.shape(target_im):\n raise IndexError(f\"two images should be of the same shape\")\n # rotation matrix\n if ref_to_tar_rotation is None:\n ref_to_tar_rotation = np.diag([1,1])\n elif np.shape(ref_to_tar_rotation) != tuple([2,2]):\n raise IndexError(f\"wrong shape for rotation matrix, should be 2x2. \")\n # get dimensions\n _dz,_dx,_dy = np.shape(reference_im)\n # calculate angle\n if verbose:\n print(f\"-- start calculating drift with rotation between images\")\n _rotation_angle = np.arcsin(ref_to_tar_rotation[0,1])/pi*180\n _temp_new_rotation_M = cv2.getRotationMatrix2D((_dx/2, _dy/2), _rotation_angle, 1) # temporary rotation angle\n # rotate image\n if _rotation_angle != 0:\n _rot_target_im = np.array([cv2.warpAffine(_lyr, _temp_new_rotation_M, \n _lyr.shape, borderMode=cv2.BORDER_DEFAULT) \n for _lyr in target_im], dtype=reference_im.dtype)\n else:\n _rot_target_im = target_im\n # calculate drift \n _drift, _drift_flag = align_image(\n _rot_target_im,\n reference_im,\n precision_fold=10,\n use_autocorr=use_autocorr,\n verbose=verbose,\n #detailed_verbose=verbose,\n **alignment_kwargs,)\n\n if verbose:\n print(f\"--- drift: {np.round(_drift,2)} pixels\")\n \n return _rot_target_im, ref_to_tar_rotation, _drift", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def collisionAngle(obj1, obj2):\n vec1 = obj1.vec\n vec2 = obj2.vec\n n1 = np.linalg.norm(vec1)\n n2 = np.linalg.norm(vec2)\n return abs(np.cross(vec1,vec2)/(n1*n2))", "def ICAngles(image, keypoints, half_patch_size, u_max):\n \n kp_position = cv.KeyPoint_convert(keypoints)\n ptsize = len(kp_position)\n \n for ptidx in range(ptsize):\n \n m_01 = 0\n m_10 = 0\n\n for u in range(-half_patch_size, half_patch_size+1):\n m_10 = m_10 + u * image[int(kp_position[ptidx,1]),int(kp_position[ptidx,0])+u]\n \n for v in range(1,half_patch_size+1):\n v_sum = 0\n d = u_max[v]\n for u in range(-d, d-1):\n val_plus = int(image[int(kp_position[ptidx,1])+v, int(kp_position[ptidx,0])+u])\n val_minus = int(image[int(kp_position[ptidx,1])-v, int(kp_position[ptidx,0])+u])\n v_sum = v_sum + (val_plus - val_minus)\n m_10 = m_10 + u * (val_plus + val_minus)\n m_01 = m_01 + v * v_sum\n \n keypoints[ptidx].angle = math.atan2(float(m_01), float(m_10))", "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def rot_center(image, rect, angle):\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = rot_image.get_rect(center=rect.center)\n\treturn rot_image,rot_rect", "def matrix_angle( B, A ):\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))", "def rot_center(image,rect,angle):\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))", "def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle", "def pairwise_iou_rotated(boxes1, boxes2):\n return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2)", "def image_correlation(image1, image2):\n im1=im_to_coord(image1)\n im2=im_to_coord(image2)\n z1=im1[:,2]\n z2=im2[:,2]\n mu_z1 = z1.mean()\n mu_z2 = z2.mean()\n n = z1.shape[0]\n s_z1 = z1.std(0, ddof=n - 1)\n s_z2 = z2.std(0, ddof=n - 1)\n cov = np.dot(z1,\n z2.T) - n * np.dot(mu_z1,\n mu_z2)\n return cov / np.dot(s_z1, s_z2)", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def rot_center(self, image, position, angle):\n rect = image.get_rect().move(*position)\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def img_rotate(img, angle, center, fillval=0):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D(center, angle, 1)\n return cv2.warpAffine(img, M, (cols, rows), borderValue=fillval)", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def orientToXYZR( a, b ):\n if allclose(a,b):\n return (0,1,0,0)\n an,bn = normalise( (a,b) )\n angle = arccos(dot(an,bn))\n x,y,z = crossProduct( a, b )[0]\n if allclose( (x,y,z), 0.0):\n y = 1.0\n return (x,y,z,angle)", "def angle_between(vecs, baseline):\n vecs = CA_coords(vecs)\n baseline = CA_coords(baseline)\n return np.arccos(np.clip(vecs @ baseline.T, -1.0, 1.0))", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def icp_step(Points1,Points2):\r\n #get the correspondences\r\n S1,S2 = get_correspondences(Points1,Points2)\r\n\r\n # Center the resulting pairs substracting their means\r\n S1_shift, mean1 = subtract_mean(S1)\r\n S2_shift, mean2 = subtract_mean(S2)\r\n\r\n #calculate the error-minimizing rotation\r\n R = compute_error_minimizing_rotation(S1_shift,S2_shift)\r\n #find the t such that R*p+t = R*(p-mean2)+mean1\r\n Rmean2 = [R[0][0]*mean2[0]+R[0][1]*mean2[1],\r\n R[1][0]*mean2[0]+R[1][1]*mean2[1]]\r\n\r\n return R,[-(mean1[0]-Rmean2[0]),-(mean1[1]-Rmean2[1])]", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def vrrotvec(a,b):\n a = normalize(a)\n b = normalize(b)\n ax = normalize(np.cross(a,b))\n angle = np.arccos(np.minimum(np.dot(a,b),[1]))\n if not np.any(ax):\n absa = np.abs(a)\n mind = np.argmin(absa)\n c = np.zeros((1,3))\n c[mind] = 0\n ax = normalize(np.cross(a,c))\n r = np.concatenate((ax,angle))\n return r" ]
[ "0.6499765", "0.64251083", "0.629712", "0.61103505", "0.594957", "0.58397025", "0.58203536", "0.57837665", "0.5770846", "0.57563686", "0.5699609", "0.5689523", "0.56460613", "0.56327623", "0.561697", "0.5562077", "0.5553344", "0.5546299", "0.5523779", "0.55019164", "0.54991394", "0.5494375", "0.5482069", "0.5479521", "0.54536027", "0.5439109", "0.5434319", "0.5432098", "0.5386073", "0.53753316" ]
0.69551015
0
Return an image created from a text file. The first line of the image should contain "nx ny nz" (separated by whitespace) All subsequent lines contain "ix iy iz val", where ix, iy, and iz are the integer x, y, and z coordinates of the point and val is the floating point value of that point. All points not explicitly listed are set to zero.
def get_textimage(fname): from string import atoi,atof infile = open(fname) lines = infile.readlines() infile.close() data = lines[0].split() nx = atoi(data[0]) ny = atoi(data[1]) nz = atoi(data[2]) e = EMData() e.set_size(nx, ny, nz) e.to_zero() for line in lines[1:]: data = line.split() ix = atoi(data[0]) iy = atoi(data[1]) iz = atoi(data[2]) val = atof(data[3]) e[ix,iy,iz] = val return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_image(filename):\n try:\n fi = open(filename,\"r\")\n lines = fi.readlines()\n n = int(lines[0]);\n img = create_zeroed_image(n)\n for i,line in enumerate(lines[1:]):\n clean_line = line.strip() # remove whitespace and newlines\n for j,char in enumerate(clean_line):\n # your code here\n \n img[i][j]=char\n # end your code here\n return img\n except IOError:\n raise Exception(\"Cannot find file \" + filename);\n finally:\n fi.close()", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z", "def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def init_from_obj_file(cls, f, scale=1, density=1):\n lines = [line.strip() for line in f.readlines()]\n vertices = []\n indexes = []\n for line in lines:\n if line.startswith(\"v\"): # vertex\n nums = list(map(float, string_to_list(line[2:])))\n vertices.append(scale * np.array(nums[:3]))\n # x.append(nums[0] * scale)\n # y.append(nums[1] * scale)\n # z.append(nums[2] * scale)\n elif line.startswith(\"f\"): # face\n nums = list(map(lambda a: int(a) - 1, string_to_list(line[2:])))\n indexes.append(nums)\n return cls(vertices, indexes, density)", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def add_to_matrix_from_file(filename):\n\tif not os.path.exists(filename):\n\t\treturn []\n\n\tpng_image = Image.open(filename)\n\tpng_image.thumbnail((32, 32), Image.ANTIALIAS)\n\n\tcol = 0\n\timage = []\n\tfor pixel in list(png_image.getdata()):\n\t\t# TODO: something better than taking the red value\n\t\tif png_image.mode == 'L':\n\t\t\tpixelr = pixel\n\t\telif png_image.mode == 'RGB':\n\t\t\tpixelr, pixelg, pixelb = pixel\n\t\telif png_image.mode == 'RGBA':\n\t\t\tpixelr, pixelg, pixelb, pixela = pixel\n\t\timage.append(int(pixelr))\n\t\tcol += 1\n\treturn image", "def create_from_file(cls, file_name: str) -> \"TensorImage\":\n image_data = image_utils.decode_image_from_file(file_name)\n return cls(image_data, is_from_numpy_array=False)", "def format_data(filename,dummy=False):\n data = np.matrix(np.loadtxt(filename))\n Y = data[:,-1]\n X = data[:,0:-1]\n if dummy:\n X = np.concatenate((np.ones((len(X), 1)), X), 1)\n return(X,Y)", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y", "def _parse_txt(path, n_channels):\n f = open(path)\n lines = f.readlines()\n f.close()\n\n geom = np.zeros((0, 2))\n\n for i, line in zip(range(n_channels), lines):\n line = line.replace('\\r', '')\n line = line.replace('\\n', '')\n row = line.split(' ')\n geom = np.vstack((geom, row[:2])).astype('float')\n\n return geom", "def get_preprocessed_image(file_name):\n\n im = np.array(Image.open(file_name)).astype(np.float32)\n assert im.ndim == 3, 'Only RGB images are supported.'\n im = im - _IMAGENET_MEANS\n im = im[:, :, ::-1] # Convert to BGR\n img_h, img_w, img_c = im.shape\n assert img_c == 3, 'Only RGB images are supported.'\n if img_h > 500 or img_w > 500:\n raise ValueError('Please resize your images to be not bigger than 500 x 500.')\n\n pad_h = 500 - img_h\n pad_w = 500 - img_w\n im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode='constant', constant_values=0)\n return np.expand_dims(im.astype(np.float32), 0), img_h, img_w", "def loadtext(infile):\n warr, farr, earr=np.loadtxt(infile, usecols=(0,1,2), unpack=True)\n return create_spectrum(warr, farr, earr)", "def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')", "def read_file ( filename ):\r\n\t# lecture de l'en-tete\r\n\tinfile = open ( filename, \"r\" ) \r\n\tnb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\r\n\r\n\t# creation de la structure de donnees pour sauver les images :\r\n\t# c'est un tableau de listes (1 par classe)\r\n\tdata = np.empty ( 10, dtype=object ) \r\n\tfiller = np.frompyfunc(lambda x: list(), 1, 1)\r\n\tfiller( data, data )\r\n\r\n\t# lecture des images du fichier et tri, classe par classe\r\n\tfor ligne in infile:\r\n\t\tchamps = ligne.split ()\r\n\t\tif len ( champs ) == nb_features + 1:\r\n\t\t\tclasse = int ( champs.pop ( 0 ) )\r\n\t\t\tdata[classe].append ( map ( lambda x: float(x), champs ) ) \r\n\tinfile.close ()\r\n\r\n\t# transformation des list en array\r\n\toutput = np.empty ( 10, dtype=object )\r\n\tfiller2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\r\n\tfiller2 ( data, output )\r\n\r\n\treturn output", "def loadtext(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def _openFlt(self, fname):\n image = np.loadtxt(fname)\n\n if(image !=None):\n M,N=(int(image[0]), int(image[1]))\n image = image[2:image.shape[0]]\n image = image.reshape((M,N))\n else:\n raise IOError, \"Image file can not be opened\"\n\n return image", "def img2vector(filename):\n img_vector = zeros((1, 32*32))\n with open(filename, 'r') as fr:\n for row in xrange(32):\n line = fr.readline()\n for column in xrange(32):\n img_vector[0, 32*row+column] = int(line[column])\n\n return img_vector", "def open_image(infile):\n with fits.open(infile) as f:\n header = f[0].header\n data = f[0].data\n if data.ndim == 2:\n # NAXIS=2: [Y, X]\n image = data\n elif data.ndim == 3 and data.shape[0] == 1:\n # NAXIS=3: [FREQ=1, Y, X]\n image = data[0, :, :]\n elif data.ndim == 4 and data.shape[0] == 1 and data.shape[1] == 1:\n # NAXIS=4: [STOKES=1, FREQ=1, Y, X]\n image = data[0, 0, :, :]\n else:\n raise ValueError(\"Slice '{0}' has invalid dimensions: {1}\".format(\n infile, data.shape))\n return (header, image)", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def f2image(infile, maskit=True):\n global master_mask\n\n tab = Table.read(infile, format=\"ascii\", names=(\"quadrant\", \"detx\", \"dety\", \"area\"), comment=\"#\")\n pixel_edges = np.arange(-0.5, 63.6)\n image = np.zeros((128,128))\n im_sub = np.zeros((64,64))\n\n data = tab[tab[\"quadrant\"] == 0]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[64:128,0:64] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 1]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[64:128,64:128] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 2]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[0:64,64:128] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 3]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[0:64,0:64] = np.copy(im_sub)\n\n if maskit:\n image[master_mask] = 0\n\n return image" ]
[ "0.6697791", "0.6425697", "0.6267778", "0.60752165", "0.6063262", "0.6061974", "0.5948842", "0.5867118", "0.5839898", "0.5778474", "0.57653004", "0.56806886", "0.5678753", "0.56717163", "0.56614995", "0.5646917", "0.5627825", "0.56230605", "0.561317", "0.56094795", "0.5598929", "0.5584855", "0.55793023", "0.555096", "0.5548226", "0.5520904", "0.55174875", "0.5508754", "0.5507941", "0.55027014" ]
0.7201921
0
Create a list of available symmetries
def list_syms(): SymStringVec=[]; SymStringVec.append("CSYM"); SymStringVec.append("DSYM"); SymStringVec.append("TET_SYM"); SymStringVec.append("OCT_SYM"); SymStringVec.append("ICOS_SYM"); SymStringVec.append("ISYM"); return SymStringVec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_symmetries(self):\n temp = self._properties.get('symmetries', [])\n return temp", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def z2_symmetries(self) -> \"Z2Symmetries\":\n return self._z2_symmetries", "def get_symbols(self):\n\n raise NotImplementedError('''\n Must implement get_symbols. Call help() for details.\n ''')", "def get_symbols(**kwargs):\n return Symbols(**kwargs).fetch()", "def itersymbols(self):\n for syms in self._symbols.itervalues():\n for sym in syms:\n yield sym", "def get_symbol(self):\n return []", "def get_symbols_list(self):\n return self.symbols_list", "def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)", "def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist", "def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))", "def symbols(self):\n pass", "def __init__(self, symbols):\r\n self.symbols = set(symbols)", "def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def symbols(self) -> List[SingleMapping]:\n return self._symbols", "def getSymbols(self):\n return self.alpha.getSymbols()", "def lookup_option_symbols(self, underlying: str) -> List[Symbol]:\n url = \"/v1/markets/options/lookup\"\n params = {\"underlying\": underlying}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.symbols", "def __init__(self):\r\n self.s_table = SymbolTable.preSymbols", "def print_symbols():\n\n global program\n if program is None:\n print \"no program is loaded\"\n return\n for(s, a) in program.symbols():\n print \"0x{:x} : {}\".format(a, s)", "def getSymbolMap():\n name = os.path.join(os.path.dirname(__file__), 'nasdaq_nasdaqcom.csv')\n symbols = TickerSymbols(name)\n return symbols.getNameToTicker()", "def getSymmetries(self, board, pi):\n return [(board, pi), (board[:, ::-1], pi[::-1])]", "def get_symbols(self):\n symbols = os.environ.get('SYMBOLS', 'btc,eth')\n if not symbols:\n return 'btc,eth'\n return symbols", "def popSym(self):\r\n res=self.dbConn.execute(\"SELECT * FROM Symposia ORDER BY StartDate\").fetchall()\r\n self.symList = [ln[\"Name\"] for ln in res]\r\n self.symID_Title = {ln[\"SymposiumID\"]:ln[\"Name\"] for ln in res}\r\n for ln in res:\r\n self.symID_Title[ln[\"Name\"]] = ln[\"SymposiumID\"]", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def getSymbols(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> List[ghidra.program.model.symbol.Symbol]:\n ...", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def ionic_symbols(self) -> list[str]:\n return self.to_list().symbols", "async def get_symbols(self):\n uri = \"/v3/spot/symbols\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def getSymbolTable(self) -> ghidra.app.util.bin.format.pe.debug.DebugCodeViewSymbolTable:\n ..." ]
[ "0.7745224", "0.669557", "0.6515156", "0.63807356", "0.63562876", "0.6307514", "0.6297486", "0.6230556", "0.61550385", "0.6110371", "0.60140777", "0.600392", "0.58965176", "0.5893767", "0.5836222", "0.58326477", "0.5825521", "0.5808387", "0.57900697", "0.5775794", "0.57617795", "0.5722399", "0.5715164", "0.5712698", "0.56934786", "0.56902784", "0.5653735", "0.56444454", "0.56345713", "0.5575523" ]
0.69593567
1
Create a centered square (or cube) with edge length of d.
def model_square(d, nx, ny, nz=1): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.squarecube", {"edge_length":d, "fill":1}) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def initialize_d(d, square_sides, offset=0):\n return {key:[] for key in range(offset, square_sides ** 2 + offset)}", "def square_diamond(sx, sy, size, strong):\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)", "def make_square(x, size):\n return [ [x, -size/2, size/2],\n\t\t\t [x, size/2, size/2],\n [x, size/2, -size/2],\n\t\t\t [x, -size/2, -size/2]]", "def make_box_square(box, offset_scale=0.05):\n\n x_min, y_min, x_max, y_max = box[:4]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n\n if height >= width:\n half_box = height / 2.\n x_min = center_x - half_box\n x_max = center_x + half_box\n if width > height:\n half_box = width / 2.\n y_min = center_y - half_box\n y_max = center_y + half_box\n\n box_side_lenght = (x_max + x_min) / 2.\n offset = offset_scale * box_side_lenght\n x_min = x_min - offset\n x_max = x_max + offset\n y_min = y_min - offset\n y_max = y_max + offset\n return (int(x_min), int(y_min), int(x_max), int(y_max))", "def from_diag(d, context = FloatContext):\n n = len(d)\n S = zeros(n,n,context)\n set_diag(S,d)\n return S", "def plasm_cube(self, size=0.1, color=WHITE):\n return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))", "def bandcholesky(A, d):\n L, dg = LDL(A, d)\n return matrix(L)*diag(sqrt(dg))", "def get_sqd_from_center(boardsize):\n assert boardsize % 2 == 1\n center = boardsize // 2\n return distances_from_pt(sq_distance, (center, center), boardsize)", "def square(center, side_length, *args, **kwargs):\n center = np.asarray(center)\n side_length = float(side_length)\n lower_left = center - 0.5*side_length\n return patch.Rectangle(lower_left, side_length, side_length,\n \t\t\t\t\t *args, **kwargs)", "def __init__(\n self, d: int, seed: Optional[int] = None, inv_transform: bool = False\n ) -> None:\n self._d = d\n self._seed = seed\n self._inv_transform = inv_transform\n if inv_transform:\n sobol_dim = d\n else:\n # to apply Box-Muller, we need an even number of dimensions\n sobol_dim = 2 * math.ceil(d / 2)\n self._sobol_engine = SobolEngine(dimension=sobol_dim, scramble=True, seed=seed)", "def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()", "def draw_square(square_edge):\n\n # create a square filled with zeros with square_edge as size and int as data type\n square = numpy.zeros((square_edge, square_edge), dtype=int)\n\n # square is a \"matix\" of N (square_edge) list\n # we use list index to place the numbers\n current_list = 0\n list_index = square_edge // 2\n\n # magic square logic\n for number in range(1, square_edge**2+1, 1):\n\n # we place our first number\n square[current_list, list_index] = number\n\n # we update how the next number will be placed in the square\n update_current_list = (current_list - 1) % square_edge\n update_list_index = (list_index + 1) % square_edge\n\n # if value of the index are not 0 we will increment current_list\n if square[update_current_list, update_list_index]:\n current_list += 1\n\n # else we simply update the index\n else:\n current_list = update_current_list\n list_index = update_list_index\n\n return square", "def get_box(x_tr, y_tr, d=1.):\n xs = np.array([-1., 1., 1., -1., -1.])\n ys = np.array([-1., -1., 1., 1., -1.])\n\n xs = xs*d/2 + x_tr\n ys = ys*d/2 + y_tr\n\n return xs, ys", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def sqft(d):\n return 2 * (d.l*d.w + d.l*d.h + d.h*d.w)", "def sqrtCF(d):\n sqrtD = sqrt(d)\n P = 0\n Q = 1\n while True:\n a = int(floor((P + sqrtD) / Q))\n yield a\n P = a * Q - P\n Q = (d - P*P) // Q # It can be shown that Q evenly divides d - P*P", "def C(width = 1, size = (10, 20), layer = 0):\n D = Device(name = 'C')\n w = width/2\n s1, s2 = size\n points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2-w),\n (s1, s2-w), (s1, s2+w), (-w, s2+w), (-w, -w)]\n D.add_polygon(points, layer = layer)\n D.add_port(name = 1, midpoint = (s1, s2), width = width, orientation = 0)\n D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0)\n return D", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def box(original, diameter):\n return Box(original, h.ones(original.size()) * diameter, None).checkSizes()", "def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):\n return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)", "def sample(self, d):\n dist = rnd.uniform(0,self.length)\n w = rnd.normal(0,self.width)\n d.pos = np.dot(rotmat(self.angle), [dist, w]) + self.pos\n d.ownpos = self.pos", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def sqnorm(self, d):\n ###TODO\n total = 0.0\n for i in d:\n total = total + (d[i] * d[i])\n return total", "def coordinates_of_square(crd):\n col = ord(crd[0]) - ord('a')\n row = int(crd[1]) - 1\n return (col * SQUARE_EDGE + BOARD_MARGIN, (7 - row) * SQUARE_EDGE + BOARD_MARGIN)", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))", "def propagateInBox((x0,y0,z0), (px,py,pz), d):\n x = x0 + px*d\n y = y0 + py*d\n z = z0 + pz*d\n\n return (x,y,z)", "def diagonal(cube_edge: int=128,\n radius: int=10,\n foreground: int=1,\n dtype=np.uint8):\n if 2 * radius > cube_edge:\n raise ValueError(\"Given radius '{}' is larger than than cube edge length {}\"\n .format(radius, cube_edge))\n stack = np.zeros((cube_edge, cube_edge, cube_edge), dtype=bool)\n cylinder = [\n ((0, 0, 0), (cube_edge - 1, cube_edge - 1, cube_edge - 1), radius)\n ]\n stack = add_cylinder_px(stack, *cylinder[0])\n return volume_bool_to_dtype(stack, fg=foreground, dtype=dtype)", "def central_composite(d,center=(2,2),alpha='o',face='ccc'):\n \n return build_central_composite(d,center=center,alpha=alpha,face=face)" ]
[ "0.5763527", "0.5631988", "0.55919534", "0.5325949", "0.52789533", "0.52779573", "0.5240144", "0.5220922", "0.5193584", "0.5174843", "0.5162735", "0.5129172", "0.5129167", "0.51191884", "0.5118387", "0.51183224", "0.50997084", "0.50534856", "0.50478446", "0.50203365", "0.4996185", "0.49504822", "0.49483687", "0.49390522", "0.49352452", "0.4905792", "0.49013817", "0.4879734", "0.4877485", "0.4870109" ]
0.63994324
0
Parse a Spider filename string and insert parameters.
def parse_spider_fname(mystr, *fieldvals): # helper functions and classes def rm_stack_char(mystr): "Helper function to remove a stack character if it exists" stackloc = mystr.find("@") if stackloc != -1: # there's an '@' somewhere if len(mystr) - 1 == stackloc: # It's at the end of the string return mystr[:-1] else: # '@' not at the end, so it's an error raise ValueError, "Invalid format: misplaced '@'." else: # no '@' at all return mystr class Fieldloc: "Helper class to store description of a field" def __init__(self, begin, end): self.begin = begin self.end = end def count(self): "Size of the field (including braces)" return self.end - self.begin + 1 def find_fields(mystr): "Helper function to identify and validate fields in a string" fields = [] loc = 0 while True: begin = mystr.find('{', loc) if begin == -1: break end = mystr.find('}', begin) field = Fieldloc(begin, end) # check validity asterisks = mystr[begin+1:end] if asterisks.strip("*") != "": raise ValueError, "Malformed {*...*} field: %s" % \ mystr[begin:end+1] fields.append(Fieldloc(begin, end)) loc = end return fields # remove leading whitespace mystr.strip() # remove stack character (if it exists) mystr = rm_stack_char(mystr) # locate fields to replace fields = find_fields(mystr) if len(fields) != len(fieldvals): # wrong number of fields? raise ValueError, "Number of field values provided differs from" \ "the number of {*...*} fields." newstrfrags = [] loc = 0 for i, field in enumerate(fields): # text before the field newstrfrags.append(mystr[loc:field.begin]) # replace the field with the field value fieldsize = field.count() - 2 fielddesc = "%0" + str(fieldsize) + "d" newstrfrags.append(fielddesc % fieldvals[i]) loc = field.end + 1 newstrfrags.append(mystr[loc:]) return "".join(newstrfrags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_filename(cls, filename):\n words = filename.split('_')\n return words[0], int(words[1][1:]), int(words[2])", "def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry", "def parse(source, *, filename=\"[STRING]\", typecheck=True):\n return _Parser(filename, source, typecheck).parse()", "def parseFilePath(self, filepath):\n\n li = filepath.split(\"/\") \n last = li[-1].split(\"_\")\n\n self.subjectName = li[-2]\n self.experimenterName = li[-3]\n self.experimentDate = last[-1]\n self.paradigm = last[-2]\n self.subjectName = last[-3]", "def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def ParseFileName(self, fn, fnParser):\n self.filename = fn\n attrs = fnParser.Parse(fn)\n [setattr(self, k, _TryNumeric(v)) for k, v in attrs]", "def parse_glider_filename(filename):\n head, tail = os.path.split(filename)\n\n matches = re.search(r\"([\\w\\d\\-]+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)\\.(\\w+)$\", tail)\n\n if matches is not None:\n return {\n 'path': head,\n 'glider': matches.group(1),\n 'year': int(matches.group(2)),\n 'day': int(matches.group(3)),\n 'mission': int(matches.group(4)),\n 'segment': int(matches.group(5)),\n 'type': matches.group(6)\n }\n else:\n raise ValueError(\n \"Filename ({}) not in usual glider format: \"\n \"<glider name>-<year>-<julian day>-\"\n \"<mission>-<segment>.<extenstion>\".format(filename)\n )", "def main():\n parse_file(sys.argv[1])", "def parse_filename(filename): # , time_fmt=TIME_INFILE_FMT):\n # Split the name up into its \"blocks\"\n parts = filename.split(\"_\")\n hive_str, rpi_str = parts[1:3]\n day_str = parts[3]\n method = parts[5]\n\n # Parse Hive and RPi number\n hive = int(hive_str[-1])\n rpi = int(rpi_str[-1])\n method = method.strip(\".csv\")\n\n # # Parse timestring into a datetime object\n # dt_naive = datetime.strptime(t_str, time_fmt)\n # dt_utc = pytz.utc.localize(dt_naive)\n\n return hive, rpi, method, day_str", "def _parse(self, infile):\n raise NotImplementedError()", "def parse(self, infile):\r\n raise NotImplementedError()", "def init_from_string(self, fs_in, param_string):\n if '(' in param_string:\n name_params_re = re.compile(r'(\\w*)\\((.*)\\)$')\n pieces = name_params_re.match(param_string)\n name = pieces.group(1)\n params = pieces.group(2)\n param_list = params.split(';')\n param_dict = {}\n for param in param_list:\n if '=' not in param:\n raise ValueError('preprocess param %s missing a value.' % param)\n k, v = param.split('=', 1)\n if v.isdigit():\n v = int(v)\n else:\n try:\n v = float(v)\n except ValueError:\n pass\n param_dict[k] = v\n self._name = name\n self.init_highpass(param_dict['highpass_cutoff'],\n param_dict['highpass_order'])\n self.init_channel_numbers(param_dict['channel_numbers'])\n else:\n self.__init__(self, fs_in, param_string)", "def insert(self, file_token, *file_tokens, preprocessor):\n tokens = (file_token,) + file_tokens\n for token in tokens:\n preprocessor.insert_file(self._get_filename(token))", "def _parse_file_path(self, input_path):\n pass", "def parse_parameters(parser):\n parser.add_argument('--parameters-file', '-p', help='parameter filename',\n required=True)\n return parser", "def preprocess(self, source, name, filename=None):\n if not name or not os.path.splitext(name)[1] in self.environment.file_extensions:\n return source\n output = StringIO()\n lexer = Lexer(iter(source.splitlines()))\n Parser(lexer, callback=output.write, debug=self.environment.slim_debug).parse()\n\n if self.environment.slim_print:\n print output.getvalue()\n\n return output.getvalue()", "def __init__(self, filename=None, label=None, tokens=None):\n if label: # specify from label/tokens, for testing.\n self.label = label\n self.tokens = tokens\n self.postID = -1\n self.likes = -1\n else: # specify from file.\n self.filename = filename\n parsedNames = filename.split(\"#\")\n if 'pop' in parsedNames[0]:\n self.label = 'pop'\n else:\n self.label = 'sod'\n self.postID = parsedNames[1]\n self.likes = parsedNames[2]\n self.tokenize()", "def init_from_file(filename, parser=int):\n filename = filename + \".\" + str(PID)\n\n def __parser_couple(s):\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n ss = s.split(\",\")\n return int(ss[0]), int(ss[1])\n\n p = PTree()\n content = SList([])\n with open(filename, \"r\") as f:\n count_line = 0\n for line in f:\n if line.strip()[0] == '#':\n continue\n # __distribution: PID -> nb of segments\n # __global_index: num seg -> (start, offset)\n if count_line == 0: # Get the distribution\n p.distribution = SList.from_str(line)\n p.start_index = p.distribution.scanl(lambda x, y: x + y, 0)[PID]\n p.nb_segs = p.distribution[PID]\n elif count_line == 1: # Get the global_index\n p.global_index = SList.from_str(line, parser=__parser_couple)\n else: # Get the content\n content.extend(Segment.from_str(line, parser=parser))\n count_line = count_line + 1\n p.content = content\n return p", "def setSourceFile(filename):", "def __init__(self, line_parser, *filename):\n \n self.line_parser = line_parser\n self.f = fileinput.input(filename)", "def _parse(\n self, source: str, name: t.Optional[str], filename: t.Optional[str]\n ) -> nodes.Template:\n return Parser(self, source, name, filename).parse()", "def parse(self, fstring):\n pass", "def parse_pts(pts_result_file, global_var_list, parsed_results_file):\n fill_value_name(pts_result_file, global_var_list, parsed_results_file)", "def parse_filenames(filenames):\n \n for fn in filenames:\n dirname, basename = path.split(fn)\n subject_visit = basename[:7]\n visit = basename[5:7]\n yield dirname, basename, subject_visit, visit", "def parse_parameters(filename):\n\n # read in the parameters\n mainInput = ParserClass.Parser(filename)\n if 'LogFile' in mainInput['Inputs']:\n if mainInput['Inputs']['LogFileUsePID']:\n logger = Logging.Logger(mainInput['Inputs']['LogFile']+'_{}'.format(os.getpid()))\n else:\n logger = Logging.Logger(mainInput['Inputs']['LogFile'])\n \n else:\n logger = print\n\n # Generate a filelist to loop over\n filelist = np.loadtxt(mainInput['Inputs']['filelist'],dtype=str,ndmin=1)\n if isinstance(mainInput['Inputs']['data_dir'], type(None)):\n filelist = [filename for filename in filelist]\n else:\n filelist = ['{}/{}'.format(mainInput['Inputs']['data_dir'],\n filename.split('/')[-1]) for filename in filelist]\n \n # Some items should always be a list\n if not isinstance(mainInput['Inputs']['pipeline'], list):\n mainInput['Inputs']['pipeline'] = [mainInput['Inputs']['pipeline']]\n # Get the class names (modulename, classname)\n jobnames = [c for c in mainInput['Inputs']['pipeline']]\n\n logger('Running: '+' '.join(mainInput['Inputs']['pipeline']))\n\n\n prejobnames = [c for c in mainInput['Inputs']['preamble']]\n\n\n # Read the class parameter file\n classInput = ParserClass.Parser(mainInput['Inputs']['classParameters'])\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n jobs = []\n for job in jobnames:\n jobs += [getClass(job)(logger=logger,**classInput[job])]\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n prejobs = []\n for prejob in prejobnames:\n prejobs += [getClass(prejob)(logger=logger,**classInput[prejob])]\n\n\n return jobs,prejobs, filelist, mainInput, classInput, logger", "def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')", "def parse_file_name(file_name):\n\n elements = file_name.split(\"_\")\n if file_name.find(\"_VI_\") > 0:\n client = elements[0]\n capture_range = \"R1\"\n condition = elements[2]\n polarization = \"VIS\"\n shot = elements[4]\n modality = \"VIS\"\n else:\n client = elements[0]\n capture_range = elements[1]\n condition = elements[2]\n polarization = elements[3]\n shot = elements[4]\n modality = \"THERMAL\"\n \n return client, capture_range, condition, polarization, shot, modality", "def parse_infile(self, infile):\n\n if type(infile)==str:\n print('Im a string')\n folder, file = os.path.split(infile)\n elif type(infile) in [list, tuple]:\n if not len(infile) == 2:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n else:\n folder, file = infile\n else:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n \n self.folder = folder\n self.file_ = file", "def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info" ]
[ "0.5459443", "0.5448573", "0.5429191", "0.52798533", "0.5271916", "0.5247543", "0.52416116", "0.5215298", "0.51574737", "0.5120585", "0.5072528", "0.5036985", "0.5022814", "0.4980182", "0.49415386", "0.4940157", "0.49290508", "0.49222475", "0.49006563", "0.48977634", "0.48940113", "0.48884514", "0.48853", "0.48508862", "0.4831189", "0.4828288", "0.48280442", "0.48250598", "0.47992033", "0.47972468" ]
0.5895396
0
Print the data in slice iz, row ix of an image to standard out.
def print_row(input, ix=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, x = %d row)" % (iz, ix) line = [] for iy in xrange(ny): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((iy + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printImage(imageObject):\n # TODO\n pass", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)" ]
[ "0.75990057", "0.7544907", "0.74450433", "0.73937047", "0.7349031", "0.7347187", "0.7033798", "0.68403655", "0.6811467", "0.66571254", "0.6630757", "0.6279358", "0.6275334", "0.61232245", "0.60401046", "0.5973948", "0.5973784", "0.5965042", "0.59583265", "0.59162855", "0.5894761", "0.5819436", "0.5811356", "0.58048147", "0.57793635", "0.57645047", "0.5754731", "0.57335156", "0.57335156", "0.5726376" ]
0.76172984
0
Print the data in slice iz, column iy of an image to standard out.
def print_col(input, iy=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, y = %d col)" % (iz, iy) line = [] for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)" ]
[ "0.77487314", "0.750461", "0.7496404", "0.73919225", "0.7150529", "0.7141496", "0.7027633", "0.6667874", "0.66633767", "0.6654244", "0.6348636", "0.621092", "0.6179944", "0.60063565", "0.59464717", "0.59386533", "0.5911687", "0.58938134", "0.5851181", "0.58338124", "0.58030814", "0.580005", "0.5770217", "0.57680136", "0.57322097", "0.5687016", "0.56508046", "0.56508046", "0.56486964", "0.5631367" ]
0.75519806
1
Print the data in slice iz of an image to standard out.
def print_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def printImage(imageObject):\n # TODO\n pass", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")" ]
[ "0.7832976", "0.7438628", "0.72238773", "0.70643157", "0.70205015", "0.6971101", "0.659192", "0.65685576", "0.6525086", "0.64007616", "0.6349104", "0.6105353", "0.6058925", "0.6016299", "0.60077834", "0.59170806", "0.5827092", "0.5820637", "0.5805464", "0.58032835", "0.57411206", "0.5733659", "0.57312804", "0.56618387", "0.56552017", "0.55842036", "0.5539581", "0.5538914", "0.55242884", "0.5521108" ]
0.7674774
1
Print the data in slice iz of an image to standard out in a format that agrees with v2
def print_image_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny-1,-1,-1): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def printImage(imageObject):\n # TODO\n pass", "def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")" ]
[ "0.7314874", "0.72975045", "0.7103524", "0.7012286", "0.6931774", "0.67332554", "0.6709795", "0.6601998", "0.6534724", "0.6502416", "0.6426161", "0.6281741", "0.6222216", "0.6221346", "0.6040175", "0.60378903", "0.59890413", "0.594744", "0.5930997", "0.5928868", "0.5903941", "0.59003437", "0.5883875", "0.5843126", "0.5829642", "0.5817241", "0.57438475", "0.5733415", "0.5705191", "0.56490237" ]
0.7488905
0
Read data from text file, if ncol = 1, read all columns if ncol >= 0, just read the (ncol+1)th column.
def read_text_file(file_name, ncol = 0): from string import split inf = file(file_name, "r") line = inf.readline() data = [] while len(line) > 0: if ncol == -1: vdata = split(line) if data == []: for i in xrange(len(vdata)): data.append([float(vdata[i])]) else: for i in xrange(len(vdata)): data[i].append(float(vdata[i])) else: vdata = float(split(line)[ncol]) data.append(vdata) line = inf.readline() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n # divide each element of the line into words\n words = line.split()\n if words: # if there is a line in fact\n if words[0][0]!='#': #do something only if not comment \n data.append(float(words[nCol])) \n # \n return data", "def load_n_col(file):\n df = pd.read_csv(file, delimiter=\" \", header=None)\n columns = [list(df[col]) for col in df]\n return columns", "def readLines(filename, col=None):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n lines = [ s.rstrip(\"\\n\\r\") for s in lines ]\n if col == None:\n return lines\n else:\n return [ s.split(\"\\t\")[col] for s in lines ]", "def read_column(file_name, column_number):\n flist = []\n empty_lines = 0\n fread = open(file_name,'r')\n for line in fread:\n chompedLine = line.rstrip()\n if not chompedLine:\n empty_lines += 1\n continue\n flist.append(float(chompedLine.split()[column_number-1]))\n\n return flist", "def read_data_6_columns(filename=\"ripple_082-085.dat\", skip=1):\n fileobj = open(filename, 'r')\n # ignore the first skip lines\n for i in range(skip):\n fileobj.readline()\n h = []; k = []; qr =[]; qz =[]; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n hval, kval, rval, zval, qval, Fval = line.split()\n h.append(int(hval)) \n k.append(int(kval))\n qr.append(float(rval))\n qz.append(float(zval))\n q.append(float(qval))\n F.append(float(Fval)) \n return h, k, qr, qz, q, F", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto", "def read_file_lines(filename, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [re.split(separator, l.strip()) for l in lines]]\n return np.transpose(res) if column_major else res", "def read_file(infile,column_num):\n\n \n column_list = []\n\n with open(infile,'r') as f:\n\n fl = f.readlines()\n\n for line in fl:\n \n \n value = int(line.split()[int(column_num)-1])\n column_list.append(value)\n\n\n return column_list", "def read_slurm_file(filename, cols, skip=54, stop=-34, column_major=True):\n\n # Set current directory\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n # Open file\n f = open(__location__ + '/' + filename, \"r\")\n\n # Read lines and skip initial lines if necessary\n lines = f.readlines()[skip:stop]\n\n # Select columns\n res = [[np.float64(line[col]) for col in cols] for line in [l.split() for l in lines]]\n return np.transpose(res) if column_major else res", "def get_data(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n data=get_str(file,cols,nrows)\n if nvar==1: return array(list(map(float,data)))\n else:\n data=list(data)\n for j in range(nvar): data[j]=array(list(map(float,data[j])))\n return tuple(data)", "def djs_readcol(name,**kwargs):\n import re\n import numpy as np\n #\n # Number of lines\n #\n try:\n f = open(name,'r')\n except IOError:\n return None\n lines = f.readlines()\n f.close()\n nlines = len(lines)\n if 'silent' in kwargs:\n silent = True\n else:\n silent = False\n if 'debug' in kwargs:\n debug = True\n else:\n debug = False\n if debug:\n print(\"{0} contains {1} lines.\".format(name, nlines))\n if 'skip' in kwargs:\n skip = kwargs['skip']\n else:\n skip = 0\n nlines -= skip\n if 'numline' in kwargs:\n nlines = min(kwargs['numline'],nlines)\n #\n # Get the number of columns from the first non-skipped line\n #\n k = skip\n while lines[k][0] == '#':\n k += 1\n whitespace = re.compile(r'\\s+')\n baseline = lines[k].strip().replace(',',' ')\n basecols = whitespace.split(baseline)\n ncol = len(basecols)\n if 'format' in kwargs:\n if re.match(r'^\\(?[ABDFILX, ]+\\)?$',kwargs['format'],re.IGNORECASE) is None:\n print(\"Invalid format string!\")\n return None\n format = kwargs['format'].replace(' ','').upper().lstrip('(').rstrip(')').split(',')\n saveformat = [f for f in format if f != 'X']\n if len(format) < ncol:\n if not silent:\n print('Format string has fewer columns than the file.')\n ncol = len(format)\n else:\n #\n # Assume all floating point format\n #\n format = list('F'*ncol)\n saveformat = format\n if debug:\n print(','.join(format))\n nread = 0\n goodlist = list()\n for l in lines[skip:nlines]:\n nread += 1\n if debug:\n print(l)\n if len(l) < ncol or l[0] == '#':\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n #\n # Split the line\n #\n cols = whitespace.split(l.strip().replace(',',' '))\n savecols = [cols[k] for k in range(ncol) if format[k] != 'X']\n savelist = list()\n if len(savecols) == len(saveformat):\n for k in range(len(saveformat)):\n if saveformat[k] == 'A':\n #\n # Save strings as is.\n #\n saved = savecols[k]\n elif saveformat[k] == 'B' or saveformat[k] == 'I' or saveformat[k] == 'L':\n try:\n saved = int(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n elif saveformat[k] == 'F' or saveformat[k] == 'D':\n try:\n saved = float(savecols[k])\n except ValueError:\n #\n # Error, bad format, skip this line\n #\n if not silent:\n print('Skipping line {0}'.format(skip+nread+1))\n continue\n else:\n print(\"Whoops, bad format! How did that happen?\")\n continue\n savelist.append(saved)\n if len(savelist) != len(saveformat):\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n else:\n #\n # Error, not enough columns\n #\n if not silent:\n print(\"Skipping line {0}\".format(skip+nread+1))\n continue\n goodlist.append(savelist)\n if len(goodlist) == 0:\n raise IOError('No valid lines found for specified format')\n if not silent:\n print(\"{0} valid lines read.\".format(len(goodlist)))\n #\n # Zip the good list\n #\n goodcols = zip(*goodlist)\n #\n # Convert the columns to pylab arrays\n #\n dtypes = { 'A':'S','B':'b','I':'i2','L':'i4','K':'i8','F':'f','D':'d' }\n converted = [np.array(goodcols[k],dtype=dtypes[saveformat[k]])\n for k in range(len(saveformat))]\n return tuple(converted)", "def read_data_4_columns(filename=\"ripple_082-085.dat\"):\n # Process comment and header lines\n fileobj = open(filename, 'r')\n while True:\n s = fileobj.readline()\n if s.startswith('#'):\n print(s)\n continue\n elif s.startswith('h'):\n break\n else:\n print(\"Any comments (including an empty line) should start with #.\")\n print(\"Please fix your input file.\")\n sys.exit(1)\n \n # Go through data points \n h = []; k = []; q = []; F = []\n lines = fileobj.readlines()\n for line in lines:\n # This ignores an empty line\n line = line.rstrip()\n if not line: \n continue\n hval, kval, qval, Fval = line.split()\n h.append(int(hval))\n k.append(int(kval)) \n q.append(float(qval))\n F.append(float(Fval))\n return h, k, q, F", "def read_file(file_name, nrows=None):\n try:\n file_handle = open(file_name)\n except PermissionError as err:\n print('File IO error: ', err, file=STDE)\n else:\n return pandas.read_table(\n file_handle, nrows=nrows, low_memory=False,\n na_values=['NA', '.']\n )", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def read_txt(self, widths=[3, 21, 4, 6, 4, 6, 12, 12]):\n cols = ['ID', 'SSSSSSSS.mmmuuun', 'AMP', 'THR', 'A-FRQ', 'R-FRQ', 'SIG STRNGTH', 'ABS-ENERGY']\n\n widths = widths\n self.data = pd.read_fwf(self.data_file, widths=widths, header=None, skiprows=self.skip_rows)\n self.data.columns = cols\n\n self.data = self.data.loc[self.data['ID'] == 1]\n self.skip_rows += len(self.data)", "def read_data(file):\n\n f = open(file, mode='r')\n\n data = f.read().split('\\n')\n\n # Pop trailing end\n while data[-1] == \"\":\n data.pop()\n\n q = data[::2]\n a = data[1::2]\n f.close()\n\n return q, a", "def textread(filepath):\n return np.array(pd.read_csv(filepath, \n sep = \"\\s+|\\t+|\\s+\\t+|\\t+\\s+\",\n header=None,\n comment='#',\n engine='python'))", "def read_data_file(input_file):\n header_lines = 0\n last_pound_pos = -1\n with open(input_file, 'r') as data_file:\n while (data_file.read(1) == '#'):\n last_pound_pos = data_file.tell()\n header = data_file.readline()\n header_lines += 1\n\n #Read the next lines\n data_1 = data_file.readline().split()\n data_2 = data_file.readline().split()\n data_file.seek(last_pound_pos + 1) #Goes back to the last line of the header\n\n if header_lines == 0:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n\n else:\n # Single line file\n if len(data_2) == 0:\n data_file.readline()\n\n else:\n\n if len(data_1) != len(\n data_2): #If there is a line containing the number of particles,\n data_file.readline()\n data_file.readline()\n\n try:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n data.columns = header.split()\n except:\n raise Exception(\"The input file '%s' is corrupted, usually the problem is because \"\\\n \"there is an end of a line that has an additional space\" %input_file)\n\n return data", "def read_col(self, colname):\n self.open_msfile()\n data = self.tb.getcol(colname)\n self.close_msfile()\n return data", "def readMatrix(file):\n file1 = open(file, \"r\")\n rawData = file1.readlines()\n file1.close() \n \n n = round(len(rawData[0])/2) \n \n matrix2D = [[None for x in range(n)] for y in range(n)] \n \n j = 0\n for line in rawData: \n i = 0 \n for element in line:\n if element != \" \":\n if i == n:\n break\n matrix2D[j][i] = element\n i+= 1 \n j+= 1 \n \n return matrix2D", "def loadtxt_fast(filename, dtype=np.int, skiprows=0, delimiter=' '):\n def iter_func():\n with open(filename, 'r') as infile:\n for _ in range(skiprows):\n next(infile)\n skip = 0\n for line in infile:\n line = line.strip().split(delimiter)\n for item in line:\n yield dtype(item)\n loadtxt_fast.rowlength = len(line)\n data = np.fromiter(iter_func(), dtype=dtype)\n data = data.reshape((-1, loadtxt_fast.rowlength))\n return data", "def read_data(fname, cols):\n df = (pd.read_csv(fname, header=None, sep=r\"\\s+\", comment=\"#\",\n names=cols, dtype=np.float64)\n .iloc[1:]) # First line is the total number of trees\n # Could reset_index, but we don't shuffle the DataFrame\n return df", "def read_users(file_name):\n f = open(file_name, \"r\")\n header = f.readline()\n f.close()\n cols = [x.strip(\"\\\"\\n\") for x in header.split('\\t')]\n return cols[12:]", "def get_str(file,cols=0,nrows='all'):\n if type(cols)==type(0):\n cols=(cols,)\n nvar=1\n else: nvar=len(cols)\n lista=[]\n for i in range(nvar): lista.append([])\n buffer=open(file).readlines() \n if nrows=='all': nrows=len(buffer)\n counter=0\n for lines in buffer:\n if counter>=nrows : break\n if lines[0]=='#': continue\n pieces=lines.split()\n if len(pieces)==0: continue\n for j in range(nvar):lista[j].append(pieces[cols[j]])\n counter=counter+1\n if nvar==1: return lista[0]\n else: return tuple(lista)", "def readFile(filename):\n df = pd.read_csv(filename, header=0) # read the file\n return df.iloc[:,:].values", "def __readData(self, f, nRows, nCols):\n # Efficiently allocate all the memory we'll need.\n data = numpy.empty( (nCols, nRows), float )\n\n # Import data from the LFM Solar Wind file\n rowIndex = 0\n for row in f.readlines():\n if len(row.split()) != nCols: continue\n\n for col, field in enumerate(row.split()):\n data[col, rowIndex] = field\n\n rowIndex += 1\n\n # Bad things can happen if the file header says there is more\n # (or less) data than there actually is within the file!\n assert(rowIndex == nRows)\n\n return data", "def read_column(path=None, into=list, linebreak=\"\\n\", lstrip=True, rstrip=True, compression=\"infer\", sheet_name=0, astype=str, exclude=(\"nan\")):\n if path is None:\n from pandas.io.clipboard import clipboard_get\n text = clipboard_get()\n else:\n if path.endswith((\".xls\", \"xlsx\")):\n text = linebreak.join(map(str, read_dataframe(path, sheet_name=sheet_name).index))\n else:\n with get_file_object(path, mode=\"read\", compression=compression, safe_mode=False, verbose=False) as f:\n text = f.read()\n \n elements = list()\n for element in text.split(linebreak):\n if lstrip:\n if isinstance(lstrip, str):\n element = element.lstrip(lstrip)\n else:\n element = element.lstrip()\n if rstrip:\n if isinstance(rstrip, str):\n element = element.rstrip(rstrip)\n else:\n element = element.rstrip()\n if bool(element):\n if element not in exclude:\n element = astype(element)\n elements.append(element)\n return into(elements)", "def method4(fname):\n\t#jfrom cStringIO import StringIO\n\t#from tokenize import generate_tokens\n\timport re\n\tprint \"Method 4: read in files by line\"\n\tprint \"and rather than printing out all of it, only print out specific cols \"\n\tf = open(fname,\"r\")\n\tline = f.readline()\n\ti = 0 \n\t\n\twhile line != '':\n\t\ttmp= line.strip()\n\t\tif tmp :\n\t\t\t#print tmp\n\t\t\t#tmp = line.strip()\n\t\t\ttmpp = tmp.split()\n\t\t\t#i +=1\n\t\t\t#print len(tmpp)\n\t\t\tif len(tmpp) >1:\n\t\t\t\tprint tmpp[1]\n\t\t#tmp = line.split(' ')\n\t\t#i += 1\n\t\t#tmp = 'sdklsd sdjlks '\n\t\t#print len(tmp)\n\t\t#if len(tmp) > 1: \n\t\t\t#print tmp[1]\n\t\tline=f.readline()\n\t\n\tf.close()\n\tprint \"Method 4 done\"", "def read_data(filename):\n f = open(filename, \"r\")\n line = f.readline()\n t, n, m, s, population = line.split()\n line = f.readline()\n board = []\n paths = []\n i = 0\n while line:\n if i < int(n):\n board.append([int(x) for x in line if x != '\\n'])\n else:\n paths.append(line if '\\n' not in line else line[:len(line) - 2])\n line = f.readline()\n i += 1\n return int(t), int(n), int(m), int(s), int(population), paths, np.array(board)", "def load_input(self, number_of_rows_to_read):\n self.dataframe = pandas.read_csv(self.filename, nrows=number_of_rows_to_read)\n #self._describe_input_data()" ]
[ "0.69048285", "0.6310644", "0.6242134", "0.61760336", "0.6040689", "0.60290545", "0.6026688", "0.60042155", "0.587215", "0.58236593", "0.5762683", "0.5731496", "0.5718888", "0.5706086", "0.5696076", "0.5676739", "0.56515324", "0.56390595", "0.56192374", "0.55811644", "0.55722106", "0.5538924", "0.55220044", "0.54926354", "0.5447086", "0.5438206", "0.5424468", "0.5422595", "0.5421211", "0.5408198" ]
0.73864484
0
linearly interpolate a 1D power spectrum to required length with required Pixel size input_object a 1D list with a 1D curve to be interpolated length_current half size of the image size (in case of power spectrum, it can be different from the length of the input_object) length_interpolated length of the interpolated 1D curve Pixel_size_current pixel size of the input 1D list Pixel_size_interpolated pixel size of the target 1D list One can either input the two lengths or two respective pixel sizes
def reshape_1d(input_object, length_current=0, length_interpolated=0, Pixel_size_current = 0., Pixel_size_interpolated = 0.): interpolated = [] if length_current == 0: length_current = len(input_object) lt = len(input_object) - 2 if length_interpolated == 0: if( Pixel_size_interpolated != Pixel_size_current): length_interpolated = int(length_current*Pixel_size_current/Pixel_size_interpolated + 0.5) else: ERROR("Incorrect input parameters","reshape_1d",1) return [] if Pixel_size_current == 0.: Pixel_size_current = 1. Pixel_size_interpolated = Pixel_size_current*float(length_current)/float(length_interpolated) qt =Pixel_size_interpolated/Pixel_size_current for i in xrange(length_interpolated): xi = float(i)*qt ix = min(int(xi),lt) df = xi -ix xval = (1.0-df)*input_object[ix] + df*input_object[ix+1] interpolated.append(xval) return interpolated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate(signal, new_length):\n assert len(signal) > 1 and len(signal[0]) > 1\n current_length = len(signal)\n signal = np.array(signal).T\n new_signal = []\n x_array = get_x_array(current_length, new_length)\n\n for l in range(len(signal)):\n fp = signal[l]\n xp = list(range(current_length))\n new_f = np.interp(x_array, xp, fp)\n new_signal.append(new_f)\n\n signal = np.array(new_signal).T\n return signal", "def _linear_interpolation(\n prevFrame : \"np.ndarray\",\n cFrame : \"np.ndarray\",\n fID : \"int\",\n smoothingFrames : \"int\"\n ) -> \"np.ndarray\":\n prevWeight = 1-((fID+1)/smoothingFrames)\n finalWeight = (fID+1)/smoothingFrames\n transitionFrame = prevWeight * prevFrame + finalWeight*cFrame\n return transitionFrame.astype(np.uint8)", "def interpolatePeriodicSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [3 * pow(x[0],2), 2 * x[0], 1, 0]\n A[i*4+2, i*4:(i+1)*4] = [-3 * pow(x2,2), -2 * x2, -1, 0]\n A[i*4+3, 0:4] = [6 * x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [-6 * x2, -2, 0, 0]\n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n for i in range(out_size - 1, -1, -1):\n val = i * scale\n res[i][0] = int(val)\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = val - int(val)\n return res", "def interpolate(arrayin,shape=(256, 256)):\r\n if arrayin.dtype == 'complex' :\r\n Ln = interpolate(np.real(arrayin),shape) + 1.0J * interpolate(np.imag(arrayin),shape)\r\n #Ln = interpolate(np.abs(arrayin),new_res) * np.exp(1.0J * interpolate(np.angle(arrayin),new_res))\r\n else :\r\n coeffs = ndimage.spline_filter(arrayin)\r\n rows,cols = arrayin.shape\r\n coords = np.mgrid[0:rows-1:1j*shape[0],0:cols-1:1j*shape[1]]\r\n Ln = sp.ndimage.map_coordinates(coeffs, coords, prefilter=False)\r\n return Ln", "def interpolateSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [6*x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [6*x2, 2, 0, 0]\n \n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def interpolate_subspec(wls, fls, prev_index, gap_ind, wl_step):\n # Get the subspectrum\n sub_spec_wls = wls[prev_index:gap_ind+1]\n sub_spec_fls = fls[prev_index:gap_ind+1]\n # Interpolate onto the new grid, using linear interpolation.\n interp_f = interp1d(sub_spec_wls, sub_spec_fls, kind=\"linear\")\n\n # Calculate the number of linear wavelength steps needed.\n min_wl = min(sub_spec_wls)\n max_wl = max(sub_spec_wls)\n n_steps = math.ceil((max_wl - min_wl) / wl_step)\n # Try a couple step sizes to get as close to the ideal size as possible.\n new_wls1, step_size1 = numpy.linspace(min_wl, max_wl, n_steps,\n retstep=True)\n new_wls2, step_size2 = numpy.linspace(min_wl, max_wl, n_steps+1,\n retstep=True)\n new_wls3, step_size3 = numpy.linspace(min_wl, max_wl, n_steps-1,\n retstep=True)\n # Choose the linear step size closest to our desired step size.\n diffs = [abs(x-wl_step) for x in [step_size1, step_size2, step_size3]]\n if diffs[0] <= diffs[1] and diffs[0] <= diffs[2]:\n new_wls = new_wls1\n elif diffs[1] <= diffs[2] and diffs[1] <= diffs[0]:\n new_wls = new_wls2\n else:\n new_wls = new_wls3\n # Calculate the interpolated values and extend the spectrum with them.\n return (list(new_wls), list(interp_f(new_wls)))", "def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence", "def uniformize(self):\n\n self.len = len(self.x)\n\n if self.len > 1:\n # comput length of the shape:\n shape_length, scale = self.euclidian_length()\n\n # find new points:\n new_shape = Stroke()\n new_shape.x = []\n new_shape.y = []\n step = shape_length / float(self.len)\n biggest_smoller_point = 0\n new_shape.append(self.x[0], self.y[0])\n for i in 1 + np.array(range(len(self.x) - 1)):\n try:\n while i * step > scale[biggest_smoller_point]:\n biggest_smoller_point += 1\n\n biggest_smoller_point -= 1\n x0 = self.x[biggest_smoller_point]\n y0 = self.y[biggest_smoller_point]\n x1 = self.x[biggest_smoller_point + 1]\n y1 = self.y[biggest_smoller_point + 1]\n diff = float(i * step - scale[biggest_smoller_point])\n dist = float(scale[biggest_smoller_point + 1] - scale[biggest_smoller_point])\n new_x = x0 + diff * (x1 - x0) / dist\n new_y = y0 + diff * (y1 - y0) / dist\n new_shape.append(new_x, new_y)\n\n except IndexError:\n print i * step\n print biggest_smoller_point\n print scale\n # new_shape.append(self.x[-1], self.y[-1])\n\n\n self.x = new_shape.x\n self.y = new_shape.y\n self.len = new_shape.len", "def _linearize(wcsim, wcsima, wcsref, imcrpix, f, shift, hx=1.0, hy=1.0):\n x0 = imcrpix[0]\n y0 = imcrpix[1]\n p = np.asarray([[x0, y0],\n [x0 - hx, y0],\n [x0 - hx * 0.5, y0],\n [x0 + hx * 0.5, y0],\n [x0 + hx, y0],\n [x0, y0 - hy],\n [x0, y0 - hy * 0.5],\n [x0, y0 + hy * 0.5],\n [x0, y0 + hy]],\n dtype=np.float64)\n # convert image coordinates to reference image coordinates:\n p = wcsref.wcs_world2pix(\n wcsim.wcs_pix2world(p, 1), 1\n ).astype(np.longdouble)\n # apply linear fit transformation:\n p = np.dot(f, (p - shift).T).T\n # convert back to image coordinate system:\n p = wcsima.wcs_world2pix(\n wcsref.wcs_pix2world(p.astype(np.float64), 1), 1\n ).astype(np.longdouble)\n\n # derivative with regard to x:\n u1 = ((p[1] - p[4]) + 8 * (p[3] - p[2])) / (6 * hx)\n # derivative with regard to y:\n u2 = ((p[5] - p[8]) + 8 * (p[7] - p[6])) / (6 * hy)\n\n return (np.asarray([u1, u2]).T, p[0])", "def gd(a, step_size=0.1, steps=42):\n out = []\n ### YOUR CODE HERE\n out.append(np.array([256,1]))\n for i in range(steps):\n point = out[i]\n gradient = np.array([0.5*2*a[i],0.5*2*a[i+1]])\n npoint = point - step_size*gradient\n out.append(npoint)\n ### END CODE\n return out", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def cv_compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n res[-1] = [0, 0]\n for i in range(out_size - 1, -1, -1):\n val = (i + 0.5) * scale - 0.5\n res[i][0] = max(0, int(val))\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = max(0, val - int(val))\n return res", "def poly_reduce(multiplier, input_size, compressed_values):\n # Square the multiplier and fully reduce it. This does not affect\n # the result modulo 2**61 - 1, but does differ from a\n # direct evaluation modulo 2**64 - 8.\n mulsq = (multiplier ** 2) % (2 ** 61 - 1)\n acc = [0]\n\n def update(y0, y1):\n \"\"\"Double-pumped Horner update (mostly) modulo 8 * (2**61 - 1).\"\"\"\n # Perform a pair of Horner updates in (mod 2**61 - 1).\n reference = multiplier * (acc[0] + y0)\n reference = multiplier * (reference + y1)\n reference %= 2 ** 61 - 1\n\n # The real update is in (mod 2**64 - 8), with a multiplier^2\n # reduced to (mod 2**61 - 1).\n acc[0] = (mulsq * (acc[0] + y0) + multiplier * y1) % (W - 8)\n # Both values should be the same (mod 2**61 - 1).\n assert acc[0] % (2 ** 61 - 1) == reference\n\n for value in compressed_values:\n lo = value % W\n hi = value // W\n update(lo, hi)\n return acc[0]", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def interpolate(self, current_frame_number):\n # check if a detection was added in this frame -> makes no sense otherwise\n if self.get_last_frame() != current_frame_number:\n return\n\n if len(self.detection_list) < 2:\n return\n \n start_frame_number = self.detection_list[-2].frame_number\n ds = self.detection_list[-2]\n end_frame_number = self.detection_list[-1].frame_number\n de = self.detection_list[-1]\n # check if frames are missing -> if none missing break\n if start_frame_number + 1 == end_frame_number:\n return\n\n # interpolate over consecutive frames (linear)\n num_interpolate = end_frame_number - start_frame_number - 1\n\n #step size\n xs_s = (de.x1 - ds.x1) / (num_interpolate + 1)\n xe_s = (de.x2 - ds.x2) / (num_interpolate + 1) \n ys_s = (de.y1 - ds.y1) / (num_interpolate + 1)\n ye_s = (de.y2 - ds.y2) / (num_interpolate + 1) \n\n for i in range(num_interpolate):\n xi1 = int(ds.x1 + xs_s * (i + 1))\n xi2 = int(ds.x2 + xe_s * (i + 1))\n yi1 = int(ds.y1 + ys_s * (i + 1))\n yi2 = int(ds.y2 + ye_s * (i + 1))\n di = det.Detection(ds.label, xi1, yi1, xi2 ,yi2, start_frame_number + i + 1, interpolated = True)\n self.detection_list.insert(len(self.detection_list) - 1, di)", "def interpolate_to_frequency(a, freq_llimit, freq_ulimit):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (freq_llimit, freq_ulimit))", "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def interpolate_spectrum(interp,wave_ini,flux_ini,wave_fnl,flux_fnl):\n wave_ini_p = wave_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_ini_p = flux_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n wave_fnl_p = wave_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_fnl_p = flux_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n\n mask = np.zeros_like(wave_fnl).astype('int32')\n mask_p = mask.ctypes.data_as(ct.POINTER(ct.c_int))\n\n interp(wave_ini.shape[0],wave_fnl.shape[0],\n wave_ini_p,flux_ini_p,\n wave_fnl_p,flux_fnl_p,mask_p)\n\n return mask", "def fit_wavelength(identlist, npixel, xorder, yorder, maxiter, clipping,\n fit_filter=None):\n # find physical order\n k, offset = find_order(identlist, npixel)\n\n # parse the fit_filter=None\n if fit_filter is None:\n fit_filter = lambda item: True\n\n # convert indent_line_lst into fitting inputs\n fit_p_lst = [] # normalized pixel\n fit_o_lst = [] # diffraction order\n fit_w_lst = [] # order*wavelength\n fit_m_lst = [] # initial mask\n # the following list is used to find the position (aperture, no)\n # of each line\n lineid_lst = []\n for aperture, list1 in sorted(identlist.items()):\n order = k*aperture + offset\n #norm_order = 50./order\n #norm_order = order/50.\n list1['order'][:] = order\n for iline, item in enumerate(list1):\n norm_pixel = item['pixel']*2/(npixel-1) - 1\n fit_p_lst.append(norm_pixel)\n fit_o_lst.append(order)\n #fit_o_lst.append(norm_order)\n #fit_w_lst.append(item['wavelength'])\n fit_w_lst.append(item['wavelength']*order)\n fit_m_lst.append(fit_filter(item))\n lineid_lst.append((aperture, iline))\n fit_p_lst = np.array(fit_p_lst)\n fit_o_lst = np.array(fit_o_lst)\n fit_w_lst = np.array(fit_w_lst)\n fit_m_lst = np.array(fit_m_lst)\n\n mask = fit_m_lst\n\n for nite in range(maxiter):\n coeff = polyfit2d(fit_p_lst[mask], fit_o_lst[mask], fit_w_lst[mask],\n xorder=xorder, yorder=yorder)\n res_lst = fit_w_lst - polyval2d(fit_p_lst, fit_o_lst, coeff)\n res_lst = res_lst/fit_o_lst\n\n mean = res_lst[mask].mean(dtype=np.float64)\n std = res_lst[mask].std(dtype=np.float64)\n m1 = res_lst > mean - clipping*std\n m2 = res_lst < mean + clipping*std\n new_mask = m1*m2*mask\n if new_mask.sum() == mask.sum():\n break\n else:\n mask = new_mask\n\n # convert mask back to ident_line_lst\n for lineid, ma, res in zip(lineid_lst, mask, res_lst):\n aperture, iline = lineid\n identlist[aperture][iline]['mask'] = ma\n identlist[aperture][iline]['residual'] = res\n\n # number of lines and used lines\n nuse = mask.sum()\n ntot = fit_w_lst.size\n return coeff, std, k, offset, nuse, ntot", "def linear_interpolator(moving):\n \n if isinstance(moving, medipy.base.Image) :\n MovingImageType = medipy.itk.itk_image_type(moving)\n else :\n MovingImageType = moving\n \n return itk.LinearInterpolateImageFunction[MovingImageType, itk.D].New()", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)", "def spline_interp(h,yy,yy_diff2,x) :\n assert type(yy)==numpy.ndarray\n #print(__name__, type(h))\n assert type(h)!=numpy.ndarray\n \n n=yy.shape[0]\n nlo=max(int(x/h),0)\n if nlo>n-1: return(0.0)\n nhi=min(nlo+1,n-1)\n a=nhi-x/h # This is checked... different to Fortran version due to 0-based arrays\n b=1.0-a\n y=a*yy[nlo]+b*yy[nhi]+((a**3-a)*yy_diff2[nlo]+(b**3-b)*yy_diff2[nhi])*(h**2)/6.0\n return y", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def linear_interpolate(src_code, dst_code, step=5):\n assert (len(src_code.shape) == 2 and len(dst_code.shape) == 2 and\n src_code.shape[0] == 1 and dst_code.shape[0] == 1 and\n src_code.shape[1] == dst_code.shape[1])\n\n linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)\n return src_code + linspace * (dst_code - src_code)", "def datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及左边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01:\r\n if pos_x<0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def datasetratiocopy_extend(l,ratio,x_offset,y_offset):#全部四边上的点都延伸\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def interpolate_to_parent(self, start, end, linspace_count):\n \n v = end - start\n length = norm(v)\n v = v / length # Make v a unit vector\n l = np.linspace(0, length, linspace_count) \n\n return np.array([start[i] + v[i] * l for i in range(3)])" ]
[ "0.59042335", "0.5510684", "0.5495997", "0.543274", "0.54308456", "0.5368978", "0.53469396", "0.5295205", "0.5252446", "0.5242712", "0.51369417", "0.5123754", "0.5104273", "0.5101531", "0.5047771", "0.50370836", "0.5025036", "0.50159943", "0.49947384", "0.49876678", "0.4978305", "0.49636522", "0.49388546", "0.49343756", "0.4929394", "0.49259147", "0.492023", "0.49149087", "0.49122736", "0.49077895" ]
0.6795765
0
Gather the a list of EMData on all nodes to the main node, we assume the list has the same length on each node.
def gather_EMData(data, number_of_proc, myid, main_node): from mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB from mpi import mpi_send, mpi_recv l = len(data) gathered_data = [] inc = 1 # A temp measure if myid == main_node: for i in xrange(0, number_of_proc*inc, inc): if i == main_node: for k in xrange(l): gathered_data.append(data[k]) else: for k in xrange(l): im = recv_EMData(i, i*l+k) mem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD) members = map(int, members) im.set_attr('members', members) gathered_data.append(im) else: for k in xrange(l): send_EMData(data[k], main_node, myid*l+k) mem = data[k].get_attr('members') mpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) mpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD) return gathered_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather(self, node):\n\n return []", "def _data_parallel_master(self, intermediates):\n\n # Always using same \"device order\" makes the ReduceAdd operation faster.\n # Thanks to:: Tete Xiao (http://tetexiao.com/)\n intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n\n to_reduce = [i[1][:2] for i in intermediates]\n to_reduce = [j for i in to_reduce for j in i] # flatten\n target_gpus = [i[1].sum.get_device() for i in intermediates]\n\n sum_size = sum([i[1].sum_size for i in intermediates])\n sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n\n broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n\n outputs = []\n for i, rec in enumerate(intermediates):\n outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n\n return outputs", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n if type(data) is torch.Tensor:\n data = data.cpu()\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to(\"cuda\"))\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to(\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data = pickle.loads(buffer)\n if type(data) is torch.Tensor:\n data = data.to(\"cuda\")\n data_list.append(data)\n\n return data_list", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def prepare_data_for_d(self):\n\n center_nodes = []\n neighbor_nodes = []\n labels = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n # self.graph[i] = [neighbors of i]\n pos = self.graph[i]\n neg, _ = self.sample(i, self.trees[i], len(pos), for_d=True)\n # print(\"tree_i_d: \", self.trees[i])\n # print(\"neg_samples: \", neg)\n # print(\"neg is: \", neg)\n if len(pos) != 0 and neg is not None:\n # positive samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(pos)\n labels.extend([1] * len(pos))\n\n # negative samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(neg)\n labels.extend([0] * len(neg))\n # print(\"cen: \", center_nodes)\n return center_nodes, neighbor_nodes, labels", "def data_nodes(self):\n data_nodes = []\n for node in self.nodes:\n if 'datanode' == node.get('infos').get('type'):\n data_nodes.append(node)\n return data_nodes", "def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)", "def ndata(self):\n raise Exception(\"Graph store doesn't support access data of all nodes.\")", "def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset", "def compute_embeddings(model, opts, data):\n node_embeddings = []\n node_scores = []\n # batch size is 1 for computing embeddings\n dataloader = DataLoader(dataset=data, batch_size=1, shuffle=True, num_workers=16)\n model.eval()\n print(\"computing embeddings...\")\n with torch.no_grad():\n for batch in tqdm(dataloader):\n batch.to(opts.device)\n batch_scores, batch_embeddings = model(batch, compute_embeddings=False)\n node_embeddings.append(batch_embeddings)\n node_scores.append(batch_scores)\n # input('enter for embeddings')\n # print(node_embeddings)\n\n return torch.stack(node_scores), torch.stack(node_embeddings)", "def all_gather_list(data, group=None, max_size=16384):\n SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n\n if enc_size + SIZE_STORAGE_BYTES > max_size:\n raise ValueError(\n 'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))\n\n rank = get_rank()\n world_size = get_world_size()\n buffer_size = max_size * world_size\n\n if not hasattr(all_gather_list, '_buffer') or \\\n all_gather_list._buffer.numel() < buffer_size:\n all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)\n all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()\n\n buffer = all_gather_list._buffer\n buffer.zero_()\n cpu_buffer = all_gather_list._cpu_buffer\n\n assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(\n 256 ** SIZE_STORAGE_BYTES)\n\n size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')\n\n cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))\n cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))\n\n start = rank * max_size\n size = enc_size + SIZE_STORAGE_BYTES\n buffer[start: start + size].copy_(cpu_buffer[:size])\n\n all_reduce(buffer, group=group)\n\n try:\n result = []\n for i in range(world_size):\n out_buffer = buffer[i * max_size: (i + 1) * max_size]\n size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')\n if size > 0:\n result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))\n return result\n except pickle.UnpicklingError:\n raise Exception(\n 'Unable to unpickle data from other workers. all_gather_list requires all '\n 'workers to enter the function together, so this error usually indicates '\n 'that the workers have fallen out of sync somehow. Workers can fall out of '\n 'sync if one of them runs out of memory, or if there are other conditions '\n 'in your training script that can cause one worker to finish an epoch '\n 'while other workers are still iterating over their portions of the data.'\n )", "def all_gather_create_tensor_list(tensor: torch.Tensor, ngpus_per_node: int) -> List[torch.Tensor]:\n # tensor_list -> Output list. It should contain correctly-sized tensors to be used \n # for output of the collective.\n tensor_list = [ torch.zeros_like(tensor) for _ in range(ngpus_per_node) ]\n # Gathers tensors from the whole group in a list. \n # The variable `tensor` will not be affected by this operation.\n dist.all_gather(tensor_list=tensor_list, tensor=tensor)\n return tensor_list", "def _get_node_list(self, machine_name, max_nodes):\n\n q = Queue()\n # if machine_name.lower() == 'summit':\n # add relative node names starting with 1 for creating ERF files\n for i in range(max_nodes):\n q.put('{}'.format(i+1))\n return q", "def eventlist():\n\n infile = conf[\"run_path_derived\"] + 'LOCALIZED.txt'\n\n data = np.genfromtxt(infile, skip_header=1) \n\n mlt = cx.MAGtoMLT(data[:, 5], data[:, 0:5])\n\n # Swap mlat and mlon colums so in expected order (lat then long)\n data[:, [6,5]] = data[:, [5,6]]\n \n data = np.hstack((data, np.reshape(mlt, (mlt.shape[0], 1))))\n \n return data", "def build_network(self):\n\n\n logits_list = []\n for dn in self.find_datanodes():\n\n if len(dn.receives_from) == 0: continue\n\n logits = 0\n for rf in dn.receives_from:\n logits += rf.get_tensors(rf.connect_backwards())[0]\n\n logits_list.append(logits)\n\n return logits_list", "def flatten_data(data):\r\n return list(gen_flatten_data(data))", "def _build_from_chunks(self, data_node):\n result = ''\n\n if not data_node:\n return ''\n\n master_data = data_node[0]\n result = \"{}{}\".format(result, self._decode(master_data['value']))\n # if data is not in chunks, then return the first node's value\n if 'tags' not in master_data or 'chunks' not in master_data['tags']:\n return result\n\n # join the values in chunks\n last_chunk = int(master_data['tags']['chunks'])\n for chunk_id in range(1, last_chunk):\n slave_data = data_node[chunk_id]\n result = \"{}{}\".format(result, self._decode(slave_data['value']))\n return result", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def items(self):\n items = []\n current = self.head\n while current != None:\n items.append(current.data)\n current = current.next\n return items", "def graph_data(\n edge_list_path,\n node_features_path,\n protein_ids_path,\n protein_id_col_node=\"Gene\",\n protein_id_col_prot=\"ensembl.gene\",\n sparse_tensor=True,\n cut=0,\n):\n a = pd.read_csv(edge_list_path).values\n edge_attr = a[:, 2:] / 1000.0\n\n # cut the edges\n cut_mask = edge_attr[:, -1] > cut\n edge_ind = torch.tensor(a[:, :2][cut_mask], dtype=torch.long)\n edge_attr = torch.tensor(edge_attr[cut_mask], dtype=torch.float32)\n\n # force undirected\n if not is_undirected(edge_ind):\n edge_ind = torch.cat([edge_ind, edge_ind[:, [1, 0]]], 0)\n edge_attr = torch.cat([edge_attr, edge_attr], 0)\n\n # features\n protein_ids = pd.read_csv(protein_ids_path, sep=\"\\t\")[\n [\"id\", protein_id_col_prot]\n ]\n x = pd.read_csv(node_features_path, sep=\"\\t\")\n feature_columns = x.drop(protein_id_col_node, 1).columns\n x = pd.merge(\n protein_ids,\n x,\n how=\"left\",\n left_on=protein_id_col_prot,\n right_on=protein_id_col_node,\n ).sort_values(\"id\")[feature_columns]\n x.fillna(x.mean(), inplace=True)\n x = torch.tensor(((x - x.mean()) / x.std()).values, dtype=torch.float32)\n data = Data(x, edge_ind.T, edge_attr, id=torch.arange(x.shape[0]))\n\n if sparse_tensor:\n tsp = ToSparseTensor(False)\n data = tsp(data)\n\n return data", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def query(self, data):\n to_return = []\n for element in data.data:\n element = torch.unsqueeze(element, 0)\n if len(self.data) < self.max_size:\n self.data.append(element)\n to_return.append(element)\n else:\n if random.uniform(0,1) > 0.5:\n i = random.randint(0, self.max_size-1)\n to_return.append(self.data[i].clone())\n self.data[i] = element\n else:\n to_return.append(element)\n return torch.cat(to_return)", "def _collect_data(self, current_generation):\n neighbours = self._Individuals()\n while True:\n\n if self._check_collected_data(neighbours):\n break\n if not self._data_consuming_queue.is_ready(current_generation):\n continue\n data = self._data_consuming_queue.consume_message(current_generation)\n if current_generation != data.generation:\n continue\n\n self._parse_received_data(neighbours, int(data.source), data.data)\n\n logger.info(\"RECEIVED data\" + str(data.data) + str(data.source))\n\n return neighbours", "def display_content(self):\n list = []\n traverse = self.head\n\n if self.head == None:\n return\n\n while traverse.next != None:\n list.append(traverse.data)\n traverse = traverse.next\n\n list.append(traverse.data)\n return list", "def offset_list(self):\n self.nodes.append(None)\n self.formulas.append(None)\n self.node_memory.append(None)\n self.formulas_memory.append(None)", "def merge_data(self, nodenet_data, keep_uids=False):\n\n uidmap = {}\n # for dict_engine compatibility\n uidmap[\"Root\"] = \"s1\"\n\n # re-use the root nodespace\n uidmap[\"s1\"] = \"s1\"\n\n # merge in spaces, make sure that parent nodespaces exist before children are initialized\n nodespaces_to_merge = set(nodenet_data.get('nodespaces', {}).keys())\n for nodespace in nodespaces_to_merge:\n self.merge_nodespace_data(nodespace, nodenet_data['nodespaces'], uidmap, keep_uids)\n\n # merge in nodes\n for uid in nodenet_data.get('nodes', {}):\n data = nodenet_data['nodes'][uid]\n parent_uid = data['parent_nodespace']\n if not keep_uids:\n parent_uid = uidmap[data['parent_nodespace']]\n if data['type'] in self.__nodetypes or data['type'] in self.native_modules:\n olduid = None\n if keep_uids:\n olduid = uid\n new_uid = self.create_node(\n data['type'],\n parent_uid,\n data['position'],\n name=data['name'],\n uid=olduid,\n parameters=data['parameters'],\n gate_parameters=data['gate_parameters'],\n gate_functions=data['gate_functions'])\n uidmap[uid] = new_uid\n node_proxy = self.get_node(new_uid)\n for gatetype in data['gate_activations']: # todo: implement sheaves\n node_proxy.get_gate(gatetype).activation = data['gate_activations'][gatetype]['default']['activation']\n\n else:\n warnings.warn(\"Invalid nodetype %s for node %s\" % (data['type'], uid))\n\n # merge in links\n for linkid in nodenet_data.get('links', {}):\n data = nodenet_data['links'][linkid]\n self.create_link(\n uidmap[data['source_node_uid']],\n data['source_gate_name'],\n uidmap[data['target_node_uid']],\n data['target_slot_name'],\n data['weight']\n )\n\n for monitorid in nodenet_data.get('monitors', {}):\n data = nodenet_data['monitors'][monitorid]\n if 'node_uid' in data:\n old_node_uid = data['node_uid']\n if old_node_uid in uidmap:\n data['node_uid'] = uidmap[old_node_uid]\n if 'classname' in data:\n if hasattr(monitor, data['classname']):\n getattr(monitor, data['classname'])(self, **data)\n else:\n self.logger.warn('unknown classname for monitor: %s (uid:%s) ' % (data['classname'], monitorid))\n else:\n # Compatibility mode\n monitor.NodeMonitor(self, name=data['node_name'], **data)", "def get_data_block(self, index, next_index):\n next_index = tf.minimum(next_index, self.Nt)\n indices = tf.range(index, next_index)\n data = [flatten_batch_dims(tf.gather(d, indices, axis=0),num_batch_dims=-self.event_size) for d in self.data]\n return data" ]
[ "0.6460555", "0.5887683", "0.56105876", "0.56016797", "0.5551757", "0.5506774", "0.5503824", "0.5480824", "0.54083604", "0.5394268", "0.5391553", "0.53643715", "0.5339283", "0.5328205", "0.5325319", "0.5300631", "0.52970463", "0.52842844", "0.52776074", "0.526741", "0.52617437", "0.52275884", "0.5188136", "0.51826525", "0.5129071", "0.5117738", "0.51122135", "0.51080143", "0.51075697", "0.51014996" ]
0.67186177
0
write headers from files in data into a disk file called filename. The filename has to be either hdf or bdb. lima list with positions in the disk files into which headers will be written, i.e., header from data[k] will be written into file number lima[k]
def write_headers(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": # For unknown reasons this does not work on Linux, but works on Mac ??? Really? DB = db_open_dict(filename) for i in range(len(lima)): DB.set_header(lima[i], data[i]) DB.close() #for i in range(len(lima)): # data[i].write_image(filename, lima[i]) elif ftp == "hdf": for i in range(len(lima)): data[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def write_data(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n fh.write(str(header))\r\n fh.write(str(data) + \"\\n\")", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def write(self, filename, data, hdr):\n pass", "def write_data_2(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n if len(header) <= 0 or len(data) <= 0:\r\n return\r\n else:\r\n fh.write(str(header + \"\\n\"))\r\n fh.write(str(data) + \"\\n\")\r\n fh.write(\"\\n\")", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def write(filename, data, extname=None, extver=None, header=None,\n clobber=False, ignore_empty=False, units=None, table_type='binary',\n names=None, write_bitcols=False, compress=None, tile_dims=None,\n **keys):\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n kwargs = {\n 'clobber': clobber,\n 'ignore_empty': ignore_empty\n }\n with FITS(filename, 'rw', **kwargs) as fits:\n fits.write(data,\n table_type=table_type,\n units=units,\n extname=extname,\n extver=extver,\n compress=compress,\n header=header,\n names=names,\n write_bitcols=write_bitcols,\n tile_dims=tile_dims)", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def tabser(filename, body, data):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n buffer = bytearray([0] * (2 ** 16))\n head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0),\n offset = head.size\n for row in data:\n body.pack_into(buffer, offset, *row, 0)\n offset += body.size\n else:\n print(\"write %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0]))\n with open(filename, \"wb\") as f:\n f.write(buffer)", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.txt\", \"w\")\n file.write(str(\"\\t\".join(hdata)) + \"\\n\")", "def write_header(indir, nb_landmark, nb_feature, mirror_factor, order_factor, feature_names=None):\n assert nb_landmark > 0\n assert os.path.exists(indir) and os.path.isdir(indir), indir + \" not found.\"\n if indir[-1] != os.sep:\n indir += os.sep\n axis = [\"x\", \"y\", \"z\"]\n header = \"ID\"\n for numb in range(1, nb_landmark + 1):\n for axe in axis:\n header += \",\" + axe + str(numb)\n if feature_names is not None:\n assert len(feature_names) == nb_feature\n header += \",\" + \",\".join(feature_names)\n else:\n for numb in range(1, nb_feature + 1):\n header += \",Feature\" + str(numb)\n header += \"\\n\"\n with open(indir + \"../landmarks.csv\", \"w\") as filep:\n filep.write(header)\n modif = \"\"\n if mirror_factor is not None:\n modif += \"_reversed\"\n if order_factor is not None:\n modif += \"_reordered\"\n if mirror_factor is not None or order_factor is not None:\n with open(indir + \"../landmarks\" + modif + \".csv\", \"w\") as filep:\n filep.write(header)", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)", "def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size", "def write_data_to_file(data, filename):\n\tif isinstance(data, pd.DataFrame):\n\t\tdata_to_print = data.values.tolist()\n\telse:\n\t\tdata_to_print = data\n\n\twith open(filename, 'w') as f:\n\t\tfor item in data_to_print:\n\t\t\tf.write(\"%s\\n\" % item)", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def _reportDataFile(self, dataFileName, outputFile):\n #subsequent access to the file should be open for \"append\"-ing\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\" ' +AutoGrader.Const.HEADER_COLOR2 + '\"><br>\\n------------- ' + os.path.split(dataFileName)[1] + ' -------------</font>\\n')\n f.close()", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def save_gyre(filename, header, data):\n with open(filename, 'wt') as f:\n header_length = len(list(header[()]))\n # if header_length == 4:\n # fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n # elif header_length == 5:\n # fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n # else:\n # raise ValueError(\"header should have 4 or 5 components but \"\n # \"it appears to have %i\" % header_length)\n if not 'version' in header.dtype.names:\n fmt = ''.join(['%6i','%26.16E'*3,'\\n'])\n else:\n fmt = ''.join(['%6i','%26.16E'*3,'%6i\\n'])\n\n f.writelines([fmt % tuple(header[()])])\n\n N = len(data[0])-1\n fmt = ''.join(['%6i',' %26.16E'*N,'\\n'])\n for row in data:\n f.writelines([fmt % tuple(row)])", "def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()", "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()" ]
[ "0.81443864", "0.65365857", "0.6521095", "0.642603", "0.6422746", "0.6345552", "0.63141286", "0.622469", "0.6156558", "0.6150772", "0.6104141", "0.60294604", "0.5993755", "0.5916143", "0.58797914", "0.58024967", "0.57776046", "0.57631105", "0.5731549", "0.57286596", "0.57176596", "0.57096976", "0.56874543", "0.56874293", "0.56824505", "0.5671921", "0.5641608", "0.5618318", "0.56097484", "0.5597597" ]
0.8396805
0
write header from a single file data into a disk file called filename. The filename has to be either hdf or bdb. lima position in the disk files into which header will be written, i.e., header from data will be written into file number lima
def write_header(filename, data, lima): from utilities import file_type from EMAN2db import db_open_dict ftp = file_type(filename) if ftp == "bdb": DB = db_open_dict(filename) DB.set_header(lima, data) elif ftp == "hdf": data.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True) else: ERROR("Unacceptable file format","write_headers",1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def write(self, filename, data, hdr):\n pass", "def write_data(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n fh.write(str(header))\r\n fh.write(str(data) + \"\\n\")", "def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def write_data_2(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n if len(header) <= 0 or len(data) <= 0:\r\n return\r\n else:\r\n fh.write(str(header + \"\\n\"))\r\n fh.write(str(data) + \"\\n\")\r\n fh.write(\"\\n\")", "def _save_to_file(filename, data, start=0, header_size=None):\n if header_size is None:\n header_size = 0\n item_dtype = data.dtype\n # Open file as necessary\n opened = False\n if isinstance(filename, str):\n fd = open(filename, 'rb+')\n opened = True\n else:\n fd = filename\n # Seek to halo location and write\n offset = header_size + (start * item_dtype.itemsize)\n fd.seek(offset, os.SEEK_SET)\n data.tofile(fd)\n if opened:\n fd.close()", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def write_headerfile(self, header_file, header):\n f = open(header_file, 'w')\n for iii in range(len(header)):\n outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\\n'\n f.write(outline)\n f.close()", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def _write_header(self, out_handle):\n out_handle.write(\"##gff-version 3\\n\")", "def write_sff_header(header, fh, num=None):\r\n\r\n lines = [\"Common Header:\"]\r\n if (num is not None):\r\n header[\"# of Flows\"] = num\r\n\r\n lines.extend([\" %s:\\t%s\" % (param, header[param])\r\n for param in header])\r\n fh.write(\"\\n\".join(lines) + \"\\n\\n\")", "def write_header(fpath, header):\n\n with open(fpath, 'r+') as f:\n content = f.read()\n # check if there is a shebang and encoding\n if content[:45] == '#!/usr/bin/env python\\n# -*- coding: utf-8 -*-':\n f.seek(46, 0)\n f.write('\\n' + header + content[46:])\n # check if there is only a shebang\n elif content[:21] == '#!/usr/bin/env python':\n f.seek(22, 0)\n f.write('\\n' + header + content[22:])\n # no shebang or encoding\n else:\n f.seek(0, 0)\n f.write(header + content)", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)", "def write_header(outfbfile, header_params, header):\n for hp in header_params:\n hdrval = sigproc.addto_hdr(hp, header[hp])\n outfbfile.write(hdrval)", "def print_header(fitsfile, ext=0, ofileh=sys.stdout):\n\n hdr = fitsio.read_header(fitsfile, ext=ext)\n ofileh.write(f\"{hdr}\")\n ofileh.write(\"\\n\")", "def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size", "def write_uef_header(file, major, minor):\n\n\t# Write the UEF file header\n\tfile.write('UEF File!\\000')\n\n\t# Minor and major version numbers\n\tfile.write(number(1, minor) + number(1, major))", "def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))", "def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def WriteHeaderFileForCcmModel(filename, model): \n\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(ccm_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(ccm_model_name + \".hpp written!\\n\")", "def writeHeader(self,header):\n pass", "def tabser(filename, body, data):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n buffer = bytearray([0] * (2 ** 16))\n head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0),\n offset = head.size\n for row in data:\n body.pack_into(buffer, offset, *row, 0)\n offset += body.size\n else:\n print(\"write %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0]))\n with open(filename, \"wb\") as f:\n f.write(buffer)" ]
[ "0.7826132", "0.70860624", "0.6762291", "0.6738056", "0.67234135", "0.67085034", "0.66346943", "0.6610399", "0.6607992", "0.6557405", "0.6481333", "0.6448737", "0.64438635", "0.6427536", "0.64213383", "0.6419421", "0.6363067", "0.63381046", "0.631573", "0.63024944", "0.62987816", "0.62825197", "0.6276485", "0.6192612", "0.61547595", "0.6150514", "0.6146963", "0.6146572", "0.61051464", "0.6102049" ]
0.8375335
0
retrieve 3D alignment parameters from the header phi theta psi tx ty tz mirror scale
def get_params3D(ima, xform = "xform.align3d"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def fk3(joint_rotations):\n h0_3 = htm0_3(joint_rotations)\n x0_3 = h0_3[0, 3]\n y0_3 = h0_3[1, 3]\n z0_3 = h0_3[2, 3]\n d0_3 = [x0_3, y0_3, z0_3]\n return d0_3", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def extract3d(xaxis, yaxis, zaxis, dat3d, crd_sys, xvec,yvec, zvec, pad=0.):\n func = RegularGridInterpolator((xaxis, yaxis, zaxis), dat3d, \n method='linear', bounds_error=False, fill_value=pad)\n\n # convert x,y,z coordinates to spherical coordinates\n if crd_sys == 'car':\n profx = xvec\n profy = yvec\n profz = zvec\n elif crd_sys == 'sph':\n # radius\n profx = np.sqrt(xvec**2 + yvec**2 + zvec**2)\n\n # theta\n tvec = np.arctan2(zvec, np.sqrt(xvec**2 + yvec**2))\n reg = tvec < 0.\n tvec[reg] = tvec[reg] + 2.*np.pi\n profy = tvec\n\n # azimuth\n pvec = np.arctan2(yvec, xvec)\n reg = pvec < 0\n pvec[reg] = pvec[reg] + 2*np.pi\n profz = pvec\n\n nvec = len(xvec)\n prof = np.zeros([nvec], dtype=np.float64)\n for ii in range(nvec):\n prof[ii] = func([profx[ii], profy[ii], profz[ii]])\n\n return prof", "def _get_quaternion_data(self, msg):\n alpha, beta, gamma = PIDController.get_euler_angle_from_quat(msg.quaternion.w, msg.quaternion.x,\n msg.quaternion.y, msg.quaternion.z)\n self._actual_euler[\"alpha\"], self._actual_euler[\"beta\"], self._actual_euler[\"gamma\"] \\\n = alpha, beta, gamma", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def parameters(self):\n # encoded in θ\n return self.theta.columns", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def getOptimizableVariables(self, TiltAlignmentParameters_):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n # translation\n if self.optimizeMarkerPositions:\n nopti += (ntilt) * 2\n\n # variable magnifications for projections\n if TiltAlignmentParameters_.dmag:\n nopti = nopti + ntilt - 1\n\n #check that irefmark and ireftilt are set properly\n if not (TiltAlignmentParameters_.irefmark in range(nmark)):\n TiltAlignmentParameters_.irefmark = 0\n print(\"Warning: irefmark must be 1<= irefmark <=nmark\")\n print(\"New irefmark: \" + str(TiltAlignmentParameters_.irefmark))\n\n if not (TiltAlignmentParameters_.ireftilt in self._projIndices.astype(int)):\n TiltAlignmentParameters_.ireftilt = abs(self._tiltAngles).argmin()\n print(\"Warning: ireftilt must be in range of projection indices\")\n print(\"New ireftilt: \" + str(TiltAlignmentParameters_.ireftilt))\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti = nopti + ntilt\n else:\n nopti = nopti + 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti = nopti + 1\n\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n\n # nopti += ntilt\n\n optimizableVariables = numpy.zeros((nopti), dtype='float')\n\n # marker 3D coords\n\n ivar = 0\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = Marker.get_r()\n optimizableVariables[ivar] = r[0]\n optimizableVariables[ivar + 1] = r[1]\n optimizableVariables[ivar + 2] = r[2]\n ivar = ivar + 3\n\n # translations\n if self.optimizeMarkerPositions:\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #if self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentTransX[itilt]\n optimizableVariables[ivar + 1] = self._alignmentTransY[itilt]\n ivar = ivar + 2\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if int(self._projIndices[itilt]) != TiltAlignmentParameters_.ireftilt:\n optimizableVariables[ivar] = self._alignmentMagnifications[itilt]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n optimizableVariables[ivar] = self._alignmentRotations[itilt]\n ivar = ivar + 1\n\n # all rotations are the same - take the first one\n else:\n optimizableVariables[ivar] = self._alignmentRotations[0]\n ivar = ivar + 1\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n optimizableVariables[ivar] = self._alignmentBeamTilt\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n # for i in range(ntilt):\n # optimizableVariables[ivar] = -1\n # ivar += 1\n\n return optimizableVariables", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def get_pars(self):\n return [self.z, self.b, self.logN]" ]
[ "0.6369768", "0.63076174", "0.57975394", "0.5729281", "0.5533436", "0.551851", "0.5500369", "0.5439547", "0.5422399", "0.54033196", "0.5388639", "0.5361121", "0.53491545", "0.53419423", "0.531873", "0.53108954", "0.53095436", "0.53085315", "0.5303664", "0.5303664", "0.5302264", "0.5296609", "0.52865446", "0.52786803", "0.52607256", "0.5229555", "0.5214137", "0.52101487", "0.51915", "0.5185534" ]
0.7157893
0
set 3D alignment parameters in the header phi theta psi tx ty tz mirror scale
def set_params3D(ima, p, xform = "xform.align3d"): t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2],"tx":p[3],"ty":p[4],"tz":p[5],"mirror":p[6],"scale":p[7]}) ima.set_attr(xform, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def set_homog_trans_mtx(x: float, y: float, z: float, mtx: numpy.ndarray):\n mtx[0][3] = x\n mtx[1][3] = y\n mtx[2][3] = z", "def setOptimizableVariables(self, TiltAlignmentParameters_, optimizableVariables):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n if self.optimizeMarkerPositions:\n # translation\n nopti += (ntilt) * 2\n\n # variable magnifications for projections, exclude scaling of reference image (S==1)\n if TiltAlignmentParameters_.dmag:\n nopti += ntilt - 1\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti += ntilt\n else:\n nopti += 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti += 1\n\n # nopti += ntilt\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n # check that number of variables is ok\n if len(optimizableVariables) != nopti:\n print(\"Length optimizableVariables: \" + str(len(optimizableVariables)))\n print(\"N optmization: \" + str(nopti))\n raise IndexError('length of optimizableVariables does not match TiltAlignmentParameters')\n\n # marker 3D coords\n ivar = 0\n\n\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = numpy.array([optimizableVariables[ivar],\n optimizableVariables[ivar + 1], optimizableVariables[ivar + 2]])\n self._Markers[imark].set_r(r)\n\n ivar = ivar + 3\n\n\n if self.optimizeMarkerPositions:\n # translations\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #FFif (self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt):\n self._alignmentTransX[itilt] = optimizableVariables[ivar]\n self._alignmentTransY[itilt] = optimizableVariables[ivar + 1]\n ivar = ivar + 2\n\n\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if (int(self._projIndices[itilt]) != int(self._projIndices[self.ireftilt])):\n self._alignmentMagnifications[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n self._alignmentRotations[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n # all rotations are the same - take the first one\n else:\n self._alignmentRotations[0] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n self._alignmentBeamTilt = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n\n if not self.optimizeMarkerPositions:\n from pytom.scripts.Rotation_function import calculate_translation\n\n\n # r_model is the modelled x,y,z coordinate of the reference marker\n r_model = self._Markers[self.irefmark].get_r()\n\n # if using a reduced set using an indices existing in the reduced set\n # i = int(numpy.argwhere(self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0])\n psi_ref = numpy.deg2rad(numpy.mean(self._alignmentRotations) + 90)\n\n for iproj in range(0,ntilt):\n # setting variables\n marker = self._Markers[self.irefmark]\n r_exp_tilt = numpy.array([marker.get_xProj(iproj), marker.get_yProj(iproj)]) - numpy.array(\n self.TiltSeries_._TiltAlignmentParas.cent)\n psi_itilt = numpy.deg2rad(self._alignmentRotations[iproj] + 90)\n theta_itilt = numpy.deg2rad(self._tiltAngles[iproj])\n magnification =self._alignmentMagnifications[iproj]\n\n # calculating translation setting difference model and experimental reference marker point at 0\n tx, ty = calculate_translation(r_model, r_exp_tilt, psi_ref, psi_itilt, theta_itilt, magnification)\n\n\n self._alignmentTransX[iproj] = tx\n self._alignmentTransY[iproj] = ty\n\n\n\n # print(self.irefmark, self._alignmentTransX[self.ireftilt], self._alignmentTransY[self.ireftilt])\n # for itilt in range(ntilt):\n # self.q[itilt] = optimizableVariables[ivar]\n # ivar += 1", "def setViewAngle3D(ang):\n dislin.vang3d(ang)", "def set_phi(self):\n self.phi = float(dihedral(self.O5.getXYZ(), self.C1.getXYZ(), self.GO.getXYZ(), self.CX.getXYZ()))", "def __init__(self, nav_data, div=5, dx=0.0, dy=1.6757135024103853, dth=0.0, pc1=1/3, pc2=9/24, ph=0.2): \n self.div = div\n self.t = np.arange(self.div)/self.div\n self.L = nav_data.loc['Loa']\n self.H = nav_data.loc['B']\n self.T = nav_data.loc['T']\n self.dx = dx \n self.dy = dy\n self.dth = dth\n self.c1 = self.L*pc1\n self.c2 = self.L*pc2\n self.h2 = self.H*ph\n self.Rz = rotation.matrix([0,0,1],dth) \n\n self.LWT = 0.50*nav_data.loc['Delta_m'] \n self.Md = nav_data.loc['Delta_m'] #self.LWT + self.DWT\n self.D = nav_data.loc['De']\n self.G = self.T - self.D \n self.z = nav_data.loc['KG'] - self.D", "def set_trans(self, head_mri_trans):\n x, y, z = -self.mri_origin[0]\n mri_tgt_trans = translation(x, y, z)\n head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)\n\n x, y, z = self.hsp.nasion[0]\n src_hsp_trans = translation(x, y, z)\n src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)\n\n rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])\n x, y, z = src_tgt_trans[:3, 3]\n\n self.rot_x = rot_x\n self.rot_y = rot_y\n self.rot_z = rot_z\n self.trans_x = x\n self.trans_y = y\n self.trans_z = z", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)", "def __init__(self, orientation = None, translation = None, panelgroup = None, homogenous_transformation = None, name = None):\n self.include_translation = True\n self.name = name\n\n if panelgroup is not None:\n d_mat = panelgroup.get_local_d_matrix()\n fast = matrix.col((d_mat[0],d_mat[3],d_mat[6])).normalize()\n slow = matrix.col((d_mat[1],d_mat[4],d_mat[7])).normalize()\n orig = matrix.col((d_mat[2],d_mat[5],d_mat[8]))\n\n v3 = fast.cross(slow).normalize()\n\n r3 = matrix.sqr((fast[0],slow[0],v3[0],\n fast[1],slow[1],v3[1],\n fast[2],slow[2],v3[2]))\n\n self.orientation = r3.r3_rotation_matrix_as_unit_quaternion()\n self.translation = orig\n\n if not self.name:\n self.name = panelgroup.get_name()\n\n elif orientation is not None or translation is not None:\n assert orientation is not None and translation is not None\n self.orientation = orientation\n self.translation = translation\n\n else:\n # Decompose the homegenous transformation assuming no scale factors were used\n h = homogenous_transformation\n self.orientation = matrix.sqr((h[0],h[1],h[2],\n h[4],h[5],h[6],\n h[8],h[9],h[10])).r3_rotation_matrix_as_unit_quaternion()\n self.translation = matrix.col((h[3],\n h[7],\n h[11]))\n assert h[12] == h[13] == h[14] == 0 and h[15] == 1", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]\r\n self.b = [1,self.lengthscale**2/3]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def set_phi(self,phi):\n\t\tr=self.r\n\t\tself.x = np.cos(np.deg2rad(phi))*r\n\t\tself.y = np.sin(np.deg2rad(phi))*r", "def __init__(self):\n Page.__init__(self, u\"Esfera, parametrización por proyecciones estereográficas\")\n\n r = .998\n esf = ParametricPlot3D(lambda t, f: (r * sin(t) * cos(f), r * sin(t) * sin(f), r * cos(t)), (0, pi, 70), (0, 2 * pi, 70))\n# esf.setAmbientColor(_1(99,136,63))\n esf.setDiffuseColor(_1(99, 136, 63))\n esf.setSpecularColor(_1(99, 136, 63))\n\n\n def proyZm1(u, v, t1):\n \"\"\"proy desde el polo norte al plano z=-1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)\n\n def proyZ1(u, v, t2):\n \"\"\"proy desde el polo sur al plano z=1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)\n\n stereo = ParametricPlot3D(proyZm1, (-3, 3, 70), (-3, 3, 70))\n stereo.setLinesVisible(True)\n stereo.setMeshVisible(False)\n stereo.setMeshDiffuseColor(_1(117, 55, 79))\n\n stereo2 = ParametricPlot3D(proyZ1, (-3, 3, 70), (-3, 3, 70))\n stereo2.setLinesVisible(True)\n stereo2.setMeshVisible(False)\n stereo2.setMeshDiffuseColor(_1(80, 87, 193))\n stereo2.setTransparency(0.5)\n stereo2.setTransparencyType(8)\n\n\n baseplane = BasePlane()\n baseplane.setHeight(-1.005)\n baseplane.setRange((-4, 4, 7))\n self.addChild(esf)\n self.addChild(stereo2)\n self.addChild(stereo)\n self.addChild(baseplane)\n\n params = [stereo,stereo2]\n\n ## no queremos los controles\n for i,p in enumerate(params):\n p.parameters['t%d' % (i+1)].hide()\n\n anims = [p.parameters['t%d' % (i+1)].asAnimation() for i,p in enumerate(params)]\n self.setupAnimations(anims)", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)" ]
[ "0.59650105", "0.593235", "0.5929709", "0.58860916", "0.5739548", "0.5691223", "0.56045157", "0.5573526", "0.55701107", "0.5569729", "0.55564326", "0.55041516", "0.5470599", "0.5299311", "0.5288693", "0.5281004", "0.5277822", "0.52361757", "0.5231874", "0.5215359", "0.5197185", "0.5192886", "0.518786", "0.5173227", "0.5172853", "0.5144875", "0.5120197", "0.5104728", "0.51026654", "0.5094818" ]
0.65861595
0
retrieve projection alignment parameters from the header phi theta psi s2x s2y
def get_params_proj(ima, xform = "xform.projection"): t = ima.get_attr(xform) d = t.get_params("spider") return d["phi"],d["theta"],d["psi"],-d["tx"],-d["ty"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def get_acquisition_pars(theta=None, phi=None, shift=None, nx=None, ny=None, cfg=None):\n # ss_rect_map = {(13, 13): 1E7, (13, 14): 1E7, (13, 15): 1E7, (13, 16): 1E7, (13, 17): 1E7,\n # (14, 13): 1E7, (14, 14): 1E5, (14, 15): 1E5, (14, 16): 1E5, (14, 17): 1E7,\n # (15, 13): 1E7, (15, 14): 1E5, (15, 15): 5E4, (15, 16): 1E5, (15, 17): 1E7,\n # (16, 13): 1E7, (16, 14): 1E5, (16, 15): 1E5, (16, 16): 1E5, (16, 17): 1E7,\n # (17, 13): 1E7, (17, 14): 1E7, (17, 15): 1E7, (17, 16): 1E7, (17, 17): 1E7}\n nmeans_dict = {(15, 15): 1,\n(16, 15): 1,\n(16, 16): 1,\n(15, 16): 1,\n(14, 16): 1,\n(14, 15): 1,\n(14, 14): 1,\n(15, 14): 1,\n(16, 14): 1,\n(17, 14): 2,\n(17, 15): 1,\n(17, 16): 1,\n(17, 17): 2,\n(16, 17): 1,\n(15, 17): 1,\n(14, 17): 1,\n(13, 17): 1,\n(13, 16): 2,\n(13, 15): 1,\n(13, 14): 1,\n(13, 13): 5,\n(14, 13): 2,\n(15, 13): 1,\n(16, 13): 5,\n(17, 13): 5,\n(18, 13): 5,\n(18, 14): 5,\n(18, 15): 5,\n(18, 16): 5,\n(18, 17): 5,\n(18, 18): 5,\n(17, 18): 5,\n(16, 18): 2,\n(15, 18): 1,\n(14, 18): 1,\n(13, 18): 5,\n(12, 18): 5,\n(12, 17): 5,\n(12, 16): 5,\n(12, 15): 5,\n(12, 14): 5,\n(12, 13): 5,\n(12, 12): 5,\n(13, 12): 5,\n(14, 12): 5,\n(15, 12): 5,\n(16, 12): 5,\n(17, 12): 5,\n(18, 12): 5,\n(19, 12): 5,\n(19, 13): 5,\n(19, 14): 5,\n(19, 15): 5,\n(19, 16): 5,\n(19, 17): 5,\n(19, 18): 5,\n(19, 19): 5,\n(18, 19): 5,\n(17, 19): 5,\n(16, 19): 5,\n(15, 19): 5,\n(14, 19): 5,\n(13, 19): 5,\n(12, 19): 5,\n(11, 19): 5,\n(11, 18): 5,\n(11, 17): 5,\n(11, 16): 5,\n(11, 15): 5,\n(11, 14): 5,\n(11, 13): 5,\n(11, 12): 5,\n(11, 11): 5,\n(12, 11): 5,\n(13, 11): 5,\n(14, 11): 5,\n(15, 11): 5,\n(16, 11): 5,\n(17, 11): 5,\n(18, 11): 5,\n(19, 11): 5}\n\n\n\n # led_center = 15\n # led_disp = (int(cfg.array_size)+1)//2\n # led_range = range(led_center-led_disp, led_center+led_disp)\n # ledmap = product(led_range, led_range)\n #\n # ss_dict = {}\n # for led in ledmap:\n # # if led == [15, 15]:\n # # ss_dict[(led[0], led[1])] = 60E4\n # # else:\n # dist = (np.abs(led[0]-15)**2+np.abs(led[1]-15))\n # ss = 5.E5*(1+.5*dist)\n # ss_dict[(led[0], led[1])] = ss\n # if ss >3E6:\n # ss_dict[(led[0], led[1])] = 3E6\n\n power = 255\n # Camera parameters\n if nx is not None:\n # if nx == 14 or nx == 15 or nx ==16 or ny == 15 or ny ==16 or ny == 14:\n # shutter_speed = 50000\n # else:\n # shutter_speed = 500000\n # nmeans = nmeans_dict[nx, ny]\n # if [nx, ny] in [[15, 15], [15, 16], [14, 17], [14,16], [14, 15],\n # [14, 14], [13,16], [13, 15]]:\n # shutter_speed = 100000\n # nmeans = 1\n # else:\n # shutter_speed = 600000\n # nmeans = 1\n\n try:\n # shutter_speed = ss_dict[nx, ny]\n shutter_speed = 50000\n nmeans = nmeans_dict[nx, ny]\n except:\n shutter_speed = 1E5\n nmeans = 1\n return float(cfg.iso), shutter_speed, power, nmeans\n\n shutter_speed_min = cfg.shutter_speed[0]\n shutter_speed_max = cfg.shutter_speed[0]\n if phi == None:\n if shift == None:\n raise Exception(\"Must assign a value either for phi or shift.\")\n shutter_speed = translate(phi, 0, cfg.shift_max,\n shutter_speed_min, shutter_speed_max)\n else:\n shutter_speed = translate(phi, 0, 90,\n shutter_speed_min, shutter_speed_max)\n # Led parameters\n led_power = cfg.max_led_power\n return cfg.iso, shutter_speed, led_power, nmeans", "def stereographic_projection(phi_degree, psi_degree):\n psi_rad = psi_degree *np.pi/180\n psi_stereo = 2*np.tan(psi_rad/2)\n\n phi_rad = phi_degree *np.pi/180\n return phi_rad, psi_stereo", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def get_preamble_z(self):\n a = PhysicalLayer.get_preamble()\n return 2,np.array([z for z in a['symb'][0:31] for _ in range(self._sps)])", "def parameters(self):\n # encoded in θ\n return self.theta.columns", "def get_projection_params(fname):\n # Read strings from the region file\n with open(fname, 'r') as f:\n lines = f.readlines()\n wcs = lines[2][:-1] # Coordinate system TODO bad variable name\n if lines[0] != '# Region file format: DS9 version 4.1\\n':\n print 'Warning: potentially invalid region file!'\n print 'First line was: ' + lines[0]\n if wcs != 'fk5':\n raise Exception('Regions must be in sky (fk5) coordinates; got ' +\n wcs + 'instead')\n\n # Manipulate each string and save to list\n lines = filter(lambda x: '# projection' in x, lines[3:])\n projspecs = []\n for ln in lines:\n lnsplit = ln[2:-1].split(')') # Remove leading '#', trailing '\\n'\n if lnsplit[1] == '':\n r = lnsplit[0] + ')' # No optional arguments\n else:\n r = lnsplit[0] + ') #' + lnsplit[1] # Add octothorpe\n projspecs.append('%s; %s' % (wcs, r))\n\n return projspecs", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def get_phase_space(self, grid_flag):\n\n f = h5py.File(self.xs_path, 'r')\n self.N = f['paramdescrip']['NVALUE'].value # det maximum range Ni for each d_i\n phase_space = {}\n order = {}\n NPAR = f['paramdescrip']['NPAR'].value[0]\n for di in range(NPAR - 1):\n di_name = f['paramdescrip']['PARNAM'].value[di] # get names for dimensions. Starts at 0\n # get values for dimensions. Starts at 1. e.g. 'BURNUP': array([ 0., 9.35253143, 18.70503998,..\n # Is saved as a np.array, of floats64 FORTRAN-contiguous\n phase_space[di_name] = np.array([float(val) for val in f['paramvaleurs'][\n 'pval %d' % (di + 1)].value], order='F')\n order[di] = di_name # e.g. '7': 'BURNUP'\n\n iso_aux = []\n # just concatenate those two\n for iso in f['contenu']['NOMISO'].value[:]:\n iso_aux.append(iso)\n for iso in f['contenu']['NOMMAC'].value[:]:\n iso_aux.append(iso)\n f.close()\n self.iso_A2 = iso_aux\n\n # USER IMPOSED: Non-independant variables set to [0].\n \"\"\"\n *Do not eliminate them, this will bring problems with the cartesin product later one\n *if instead of '[phase_space['PHASE'][0]]' (which is equal to 1) just '[1]' is written then np.where() does not recognize the value.\n\n This two problems rise from the decision of defining the 'space of interest' as a subset from the 'phase space' which in time is read directly from the H5F file. Later several comparisons are made between the two. The upside is the need for no explicit declaration of the phase-space thus minimizing chances of un-noticed error in domain assignation.\n \"\"\"\n if 'PHASE' in phase_space.keys():\n phase_space['PHASE'] = [phase_space['PHASE'][0]]\n if 'BURNUPstep' in phase_space.keys():\n phase_space['BURNUPstep'] = [phase_space['BURNUPstep'][0]]\n\n if grid_flag == 'SG': # major update required\n \"\"\"\n In contras to FG, the stored values in the concatenated SAPHYB file only considers different burnup steps, i.e a set of values [0, 500, 500, 100] are stored as [0, 500, 100]. Two posibilities remain, read the BURNUP value from the single XS files separatly or load a pickeled object with the phase space. The second option was implemented.\n \"\"\"\n with open(self.file_path + self.xs_folder + 'phase_space.pickle', 'rb') as handle:\n phase_space_pk = pickle.load(handle)\n phase_space_pk.pop('a')\n phase_space_pk.pop('d')\n phase_space_pk.pop('l')\n phase_space_pk.pop('BURNUP_evol')\n phase_space_pk.pop('BURNUP_steps')\n phase_space = phase_space_pk\n\n self.phase_space, self.order, self.d, self.NPAR = phase_space, order, len(order), NPAR", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def get_probeLocs_calib_setup_cm(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54]\n y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54]\n z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def _sims_header(self, hdr):\n # Called DefAnalysisBis and DefEps in OpenMIMS\n d = {}\n d['simsheader version'], d['original filename'], d['matrix'], \\\n d['sigref auto'], d['sigref points'], d['sigref delta'], \\\n d['sigref scan time'], d['sigref measure time'], \\\n d['sigref beam on time'], d['eps centering enabled'], \\\n d['eps enabled'], d['eps central energy'], d['eps b field'] = \\\n unpack(self._bo + 'i 256s 256s 10i', hdr.read(556))\n\n d['EPSCentralSpecies'] = self._species(hdr)\n d['EPSReferenceSpecies'] = self._species(hdr)\n\n # Don't know how long method name is, runs into null-padded zone.\n d['eps ref mass tube hv'], d['eps ref mass tube hv max var'], \\\n d['sample rotation'], d['sample rotation speed'], \\\n d['sample rotation synced'], d['sample name'], \\\n d['user name'], d['method name'] = \\\n unpack(self._bo + '2d 3i 80s 32s 256s', hdr.read(396))\n\n d['original filename'] = self._cleanup_string(d['original filename'])\n d['matrix'] = self._cleanup_string(d['matrix'])\n d['sample name'] = self._cleanup_string(d['sample name'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['method name'] = self._cleanup_string(d['method name'])\n\n d['sigref auto'] = bool(d['sigref auto'])\n d['eps centering enabled'] = bool(d['eps centering enabled'])\n d['eps enabled'] = bool(d['eps enabled'])\n d['sample rotation'] = bool(d['sample rotation'])\n d['sample rotation synced'] = bool(d['sample rotation synced'])\n d['sigref scan time'] /= 10 # 0.1 sec increments\n return d", "def calculateSipWcsHeader(wcs, order, bbox, spacing, header=None):\n transform = getPixelToIntermediateWorldCoords(wcs)\n crpix = wcs.getPixelOrigin()\n cdMatrix = wcs.getCdMatrix()\n crval = wcs.getSkyOrigin()\n gridNum = Extent2I(int(bbox.getWidth()/spacing + 0.5), int(bbox.getHeight()/spacing + 0.5))\n\n sip = SipApproximation(transform, crpix, cdMatrix, Box2D(bbox), gridNum, order)\n\n md = makeTanSipMetadata(sip.getPixelOrigin(), crval, sip.getCdMatrix(), sip.getA(), sip.getB(),\n sip.getAP(), sip.getBP())\n\n if header is not None:\n header.combine(md)\n else:\n header = md\n\n return header", "def orbitproject(x,y,inc,phi=0,psi=0):\n\n x2 = x*np.cos(phi) + y*np.sin(phi)\n y2 = -x*np.sin(phi) + y*np.cos(phi)\n z2 = y2*np.sin(inc)\n y2 = y2*np.cos(inc)\n\n xf = x2*np.cos(psi) - y2*np.sin(psi)\n yf = x2*np.sin(psi) + y2*np.cos(psi)\n\n return (xf,yf,z2)", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def read_projection(fname, element, theta_index):\n\n projections = dxchange.read_hdf5(fname, \"MAPS/XRF_roi\")\n theta = float(dxchange.read_hdf5(fname, \"MAPS/extra_pvs_as_csv\")[theta_index].split(b',')[1])\n elements = read_channel_names(fname)\n\n try:\n if find_index(elements, element) != None:\n return projections[find_index(elements, element),:, :], theta\n else:\n raise TypeError\n except TypeError:\n print(\"**** ERROR: Element %s does exist in the file: %s \" % (element, fname))\n return None", "def sat_2d_pos(theta):\n r_sat = a * (1 - e**2) / (1 + e * np.cos(theta))\n return r_sat, theta", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)", "def parse_annotations(Hinv, obsmat_txt):\n\n def to_image_frame(loc):\n \"\"\"\n Given H^-1 and (x, y, z) in world coordinates,\n returns (u, v, 1) in image frame coordinates.\n \"\"\"\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)\n\n mat = np.loadtxt(obsmat_txt)\n num_peds = int(np.max(mat[:, 1])) + 1\n peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path\n\n num_frames = (mat[-1, 0] + 1).astype(\"int\")\n num_unique_frames = np.unique(mat[:, 0]).size\n recorded_frames = [-1] * num_unique_frames # maps timestep -> (first) frame\n peds_in_frame = [[] for _ in range(num_unique_frames)] # maps timestep -> ped IDs\n\n frame = 0\n time = -1\n blqk = False\n for row in mat:\n if row[0] != frame:\n frame = int(row[0])\n time += 1\n recorded_frames[time] = frame\n\n ped = int(row[1])\n\n peds_in_frame[time].append(ped)\n loc = np.array([row[2], row[4], 1])\n loc = to_image_frame(loc)\n loc = [time, loc[0], loc[1], loc[2]]\n peds[ped] = np.vstack((peds[ped], loc))\n\n return recorded_frames, peds_in_frame, peds", "def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix", "def eclipse_parameters(sat, earth, sun, time):\n\n position = earth + sat\n barycentric_e = position.at(time).observe(earth)\n barycentric_s = position.at(time).observe(sun)\n _, _, distance_to_earth = barycentric_e.radec()\n _, _, distance_to_sun = barycentric_s.radec()\n theta_e = semidiameter(earthlib.earth_radius_au, distance_to_earth.au)\n theta_s = semidiameter(0.00465, distance_to_sun.au) # Sun's average radius in AU = 0.00465\n theta = barycentric_e.separation_from(barycentric_s).radians\n return theta, theta_e, theta_s", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]" ]
[ "0.61034876", "0.60535675", "0.6002323", "0.55047", "0.53491384", "0.53456676", "0.5342712", "0.5333783", "0.5266437", "0.52358764", "0.52249706", "0.5206061", "0.5192297", "0.51854604", "0.5182782", "0.51370823", "0.51352656", "0.512739", "0.5105829", "0.50973487", "0.5085923", "0.5063176", "0.5059661", "0.5044287", "0.50372756", "0.5009923", "0.4982236", "0.49807766", "0.4974874", "0.49621952" ]
0.6081615
1
set projection alignment parameters in the header phi theta psi s2x s2y
def set_params_proj(ima, p, xform = "xform.projection"): from EMAN2 import Vec2f t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2]}) t.set_trans(Vec2f(-p[3], -p[4])) ima.set_attr(xform, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def set_phi(self,phi):\n\t\tr=self.r\n\t\tself.x = np.cos(np.deg2rad(phi))*r\n\t\tself.y = np.sin(np.deg2rad(phi))*r", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def map_sim_positions(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, ax1 = plt.subplots(figsize=(10,10))\n # p.gal_index = np.where(GR.file_name == 'z0.00_G7169_cG29270')[0][0]\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n # print('TEST!',gal_ob.file_name,p.gal_index)\n simdata = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n\n # Plot\n print(simdata.head())\n ax1.plot(simdata.x,simdata.y,'o',ms=2,mew=2)\n\n print(gal_ob.radius)\n # Limit axes limits a bit to avoid area with no particles...\n # ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])make_projec\n # ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')", "def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def make_wcsheader(ra=40.07293, dec=-1.6137748, size=2, pixscale=0.1, get_hdu=False, theta=0):\n \n if np.isscalar(pixscale):\n cdelt = [pixscale/3600.]*2\n else:\n cdelt = [pixscale[0]/3600., pixscale[1]/3600.]\n \n if np.isscalar(size):\n npix = np.cast[int]([size/pixscale, size/pixscale])\n else:\n npix = np.cast[int]([size[0]/pixscale, size[1]/pixscale])\n \n hout = pyfits.Header()\n hout['CRPIX1'] = npix[0]/2\n hout['CRPIX2'] = npix[1]/2\n hout['CRVAL1'] = ra\n hout['CRVAL2'] = dec\n hout['CD1_1'] = -cdelt[0]\n hout['CD1_2'] = hout['CD2_1'] = 0.\n hout['CD2_2'] = cdelt[1]\n hout['NAXIS1'] = npix[0]\n hout['NAXIS2'] = npix[1]\n hout['CTYPE1'] = 'RA---TAN'\n hout['CTYPE2'] = 'DEC--TAN'\n \n wcs_out = pywcs.WCS(hout)\n \n theta_rad = np.deg2rad(theta)\n mat = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n\n rot_cd = np.dot(mat, wcs_out.wcs.cd)\n \n for i in [0,1]:\n for j in [0,1]:\n hout['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n wcs_out.wcs.cd[i,j] = rot_cd[i,j]\n \n cd = wcs_out.wcs.cd\n wcs_out.pscale = get_wcs_pscale(wcs_out) #np.sqrt((cd[0,:]**2).sum())*3600.\n \n if get_hdu:\n hdu = pyfits.ImageHDU(header=hout, data=np.zeros((npix[1], npix[0]), dtype=np.float32))\n return hdu\n else:\n return hout, wcs_out", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def setInstrumentParameters(self, instrpars):\n\n # A reference to the primary header is also required.\n\n for p in self.assoc.parlist:\n p['image'].setInstrumentParameters (instrpars, p['exposure'].header)", "def __init__(self, params, print_df=True, print_help=False):\n stellar_type, position, parallax, proper_motion, v_radial = params\n self.init_params = params\n self.stellar_type = stellar_type\n self.proper_motion = proper_motion # [mas/year, mas/year]\n self.distance = 1/parallax # parsecs\n self.parallax = parallax # arcsecs\n self.position = position # [hms, dms]\n self.v_radial = v_radial # km/s\n\n self.galactic_coords = radec_to_galactic(self.position) # degrees\n\n # Proper motion, described in Cartesian components\n self.pm_dec = self.proper_motion[1]\n # We don't need to scale by cos(dec) because the units are already in mas/year\n self.pm_ra = self.proper_motion[0] #* np.cos(self.pm_dec)\n\n # Proper motion, described in angular components\n self.pm_mag = np.sqrt(self.pm_ra**2 + self.pm_dec**2) # mas/year\n # PA = angle east of north\n self.pm_posang = round(np.arctan(self.pm_ra/self.pm_dec), 4) # radians\n\n self.v_transverse = 4.74 * self.pm_mag * self.distance # km/s\n\n # Space velocity is the third leg of the v_trans/v_rad triangle.\n self.v_space = np.sqrt(self.v_transverse**2 + self.v_radial**2)\n\n star_obj = SkyCoord(Angle(position[0]), Angle(position[1]), frame='icrs')\n self.constellation = get_constellation(star_obj)\n\n self.d_from_GC = self.distance_to_galactic_center() # parsecs\n self.closer = True if self.d_from_GC > d_sun_GC else False\n\n d = [{'Name': 'Stellar Type', 'Value': self.stellar_type, 'units': 'N/A'},\n {'Name': 'Distance', 'Value': self.distance, 'units': 'parsec'},\n {'Name': 'Parallax', 'Value': self.parallax, 'units': 'arcsecs'},\n {'Name': 'Position', 'Value': self.position, 'units': '[hms, dms]'},\n {'Name': 'Galactic Coordinates', 'Value': self.galactic_coords,\n 'units': 'degrees'},\n {'Name': 'Proper Motion (RA)', 'Value': self.pm_ra, 'units': 'mas/year'},\n {'Name': 'Proper Motion (Dec)', 'Value': self.pm_dec, 'units': 'mas/year'},\n {'Name': 'Proper Motion Magnitude', 'Value': self.pm_mag, 'units': 'mas/year'},\n {'Name': 'Proper Motion Position Angle', 'Value': self.pm_posang,\n 'units': 'radians'},\n {'Name': 'Radial Velocity', 'Value': self.v_radial, 'units': 'km/s'},\n {'Name': 'Transverse Velocity', 'Value': self.v_transverse, 'units': 'km/s'},\n {'Name': 'Space Velocity', 'Value': self.v_space, 'units': 'km/s'},\n {'Name': 'Host Constellation', 'Value': self.constellation, 'units': 'N/A'},\n {'Name': 'Distance from Galactic Center', 'Value': self.d_from_GC,\n 'units': 'parsecs'},\n {'Name': 'Closer than Sun to GC?', 'Value': self.closer, 'units': 'N/A'}\n ]\n\n self.full_param_df = pd.DataFrame(d)\n\n if print_help:\n print getdoc(self), '\\n\\n'\n\n if print_df:\n print self.full_param_df", "def combine_par(output_dir): \n #start time\n start_time = time.time()\n \n # set input/output file paths\n infile0 = output_dir + 'TransformParameters.0.txt'\n infile1 = output_dir + 'TransformParameters.1.txt'\n outfile0 = output_dir +'TransformParameters.fwd.txt'\n outfile1 = output_dir +'TransformParameters.inv.txt'\n \n # define reference frame for registration\n ref = 0\n spacing = 1\n \n # Open parameter file 0 and search for GridSpacing and GridOrigin line\n text_filein0 = open( infile0, \"r\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n origin_str = line\n elif line.find( \"(GridSpacing \" ) == 0:\n spacing_str = line\n text_filein0.close()\n \n # Extract time point origin from line\n origin_split = origin_str.strip().split(' ')\n origin_split = origin_split[ len( origin_split ) - 1 ].split(')')\n old_origin = float( origin_split[ 0 ] )\n \n # Extract time point spacing from line\n spacing_split = spacing_str.strip().split(' ')\n spacing_split = spacing_split[ len( spacing_split ) - 1 ].split(')')\n old_spacing = float( spacing_split[ 0 ] )\n \n \n print(\"Original grid origin in time dimension: \" + str( old_origin ))\n print(\"Original grid spacing in time dimension: \" + str( old_spacing ))\n print(\"\")\n \n # Determine new grid origin\n new_origin = ref - ( spacing / old_spacing ) * ( ref - old_origin )\n print( \"New grid origin in time dimension: \" + str( new_origin ))\n \n # Recompose origin and spacing lines\n new_origin_string = origin_str.strip().split(' ')\n new_origin_string.pop()\n new_origin_string = \" \".join( new_origin_string ) + \" \" + str( new_origin ) + \")\\n\"\n new_spacing_string = spacing_str.strip().split(' ')\n new_spacing_string.pop()\n new_spacing_string = \" \".join( new_spacing_string ) + \" \" + str( spacing ) + \")\\n\"\n \n # Reopen text file, replace origin and spacing and write to output file 0\n text_filein0 = open( infile0, \"r\" )\n text_fileout0 = open( outfile0, \"w\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n # Write new origin line\n text_fileout0.write( new_origin_string )\n elif line.find( \"(GridSpacing \" ) == 0:\n # Write new spacing line\n text_fileout0.write( new_spacing_string )\n elif line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Remove initial transform\n text_fileout0.write( \"(InitialTransformParametersFileName \\\"NoInitialTransform\\\")\\n\" )\n else:\n # Write line read from input file (no change)\n text_fileout0.write( line )\n text_filein0.close()\n text_fileout0.close()\n \n # Open parameter file 1 and search for GridSize\n text_filein1 = open( infile1, \"r\" )\n for line in text_filein1:\n if line.find(\"(GridSize\") == 0:\n grid_str = line\n grid_split = grid_str.strip().split(' ')\n grid_split[-1] = grid_split[-1].replace(')','')\n grid_split = grid_split[1:]\n grid_float = [float(s) for s in grid_split]\n grid_all = int(grid_float[0] * grid_float[1] * grid_float[2] * grid_float[3])\n num_phase = int(grid_float[3])\n text_filein1.close()\n \n # Replace initial transform parameter filename\n text_filein1 = open( infile1, \"r\" )\n text_fileout1 = open( outfile1, \"w\" )\n for line in text_filein1:\n if line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Set initial transform filename\n text_fileout1.write( \"(InitialTransformParametersFileName \\\"\" + outfile0 + \"\\\")\\n\" )\n elif line.find(\"(TransformParameters \") == 0:\n # extract b-spline parameters, arrangment : x (Px*Py*Pz), y(Px*Py*Pz), z(Px*Py*Pz), t(Px*Py*Pz)\n transPar_str = line\n transPar_split = transPar_str.strip().split(' ')\n transPar_split[-1] = transPar_split[-1].replace(')','')\n transPar_split = transPar_split[1:]\n num_grid3d = int(grid_all / num_phase) \n str_seg = transPar_split[(ref*num_grid3d):((ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all+(ref*num_grid3d)): (grid_all + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*2+(ref*num_grid3d)): (grid_all*2 + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*3+(ref*num_grid3d)): (grid_all*3 + (ref+1)*num_grid3d)] * num_phase\n #str_seg = \"\"\n #str_seg = [str_seg + transPar_split[((ref*num_grid3d)+grid*i):((ref+1)*num_grid3d+grid*i)] * num_phase for i in range(4)]\n str_joined = ' '.join(str_seg)\n text_fileout1.write(\"(TransformParameters \" + str_joined + \")\\n\")\n else:\n # Write line read from input file (no change)\n text_fileout1.write( line )\n text_filein1.close()\n text_fileout1.close()\n \n # caclulate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n print('combine_par done. elapsed time:', elapsed_time, 's')", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise", "def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut in range(obj_data['ncutout'][iobj]):\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = psf_data[file_id]\n\n pim = p.get_rec(row,col)\n cen = p.get_center(row,col)\n\n psf_shape = pim.shape\n psf_npix = pim.size\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def set_TranslationsInTiltSeries(self, TiltSeries_):\n for (kk, Proj) in enumerate(TiltSeries_._ProjectionList):\n Proj._alignmentTransX = self._alignmentTransX[kk]\n Proj._alignmentTransY = self._alignmentTransY[kk]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def set_Y_homog_rot_mtx(angle_rads: float, mtx: numpy.ndarray):\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n mtx[0][0] = mtx[2][2] = cosang\n mtx[0][2] = sinang\n mtx[2][0] = -sinang", "def configure_galaxy(self, galaxy_index, orbital_radius, orbital_particles, theta=0., phi=0., m=None):\n orbital_particles = np.int_(orbital_particles)\n\n if not (galaxy_index == 1 or galaxy_index == 2):\n raise ValueError('Expect either 1 or 2 in galaxy_index')\n else:\n is_g_1 = galaxy_index == 1 # True for galaxy 1 False for galaxy 2\n\n if not isinstance(m, float):\n m = self.M1_feel if is_g_1 else self.M2_feel\n\n if is_g_1:\n if not hasattr(self, '_vx1') or not hasattr(self, '_vy1') or not hasattr(self, '_x1') or not hasattr(self, '_y1'):\n raise UserWarning('Initial phase of body 1 doesn\\'t exist. Please call solve_two_body_problem() first. ' +\n '(Defaulting the initial phase of body 1 to a zero speed at the origin')\n initial_velocity_body = np.zeros((3))\n initial_position_body = np.zeros((3))\n else:\n initial_velocity_body = np.array([self._vx1[0], self._vy1[0], 0])\n initial_position_body = np.array([self._x1[0], self._y1[0], 0])\n else:\n if not hasattr(self, '_vx2') or not hasattr(self, '_vy2') or not hasattr(self, '_x2') or not hasattr(self, '_y2'):\n raise UserWarning('Initial phase of body 2 doesn\\'t exist. Please call solve_two_body_problem() first. ' +\n '(Defaulting the initial phase of body 2 to a zero speed at the origin')\n initial_velocity_body = np.zeros((3))\n initial_position_body = np.zeros((3))\n else:\n initial_velocity_body = np.array([self._vx2[0], self._vy2[0], 0])\n initial_position_body = np.array([self._x2[0], self._y2[0], 0])\n\n # create rotation matrix\n R1 = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n R2 = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n R = np.matmul(R2, R1)\n\n galaxy_initial_condition = []\n\n for (radius, particles) in np.nditer([orbital_radius, orbital_particles]):\n angular_space = np.linspace(0, 2 * np.pi, particles, endpoint=False)\n\n # first initialise the componenets in x-y plane in the core's frame\n initial_condition = np.zeros((particles, 6))\n\n initial_condition[:, 0] = radius * np.cos(angular_space)\n initial_condition[:, 1] = radius * np.sin(angular_space)\n # initial_condition[:, 2] = np.zeros\n\n velocity = np.sqrt(self._G_feel * m / radius)\n initial_condition[:, 3] = -velocity * np.sin(angular_space)\n initial_condition[:, 4] = velocity * np.cos(angular_space)\n\n # rotate the vectors\n initial_condition[:, :3] = np.matmul(R, initial_condition[:, :3].T).T\n initial_condition[:, 3:] = np.matmul(R, initial_condition[:, 3:].T).T\n\n # galilean transform the initial velocity to the problem frame, by adding the velocity of core\n initial_condition[:, 3:] += initial_velocity_body # broadcasting\n\n # displace the origin to the location of the core, by adding the position vector of core\n initial_condition[:, :3] += initial_position_body # broadcasting\n\n galaxy_initial_condition.append(initial_condition)\n\n # TwoGalaxyProblem._galaxy#_initial_condition is a list of (n, 6) arrays.\n # Each (n, 6) array corresponds to each orbit,\n # where n is the number of particle in each orbit.\n # The 0, 1, 2 dimension of the last axis is the x, y, z component of each particle.\n # The 3, 4, 5 dimension of the last axis is the conjugate momentum of the x, y, z component of each particle.\n if is_g_1:\n self._galaxy1_initial_condition = galaxy_initial_condition\n else:\n self._galaxy2_initial_condition = galaxy_initial_condition\n\n orbital_orientation = {\n 'theta': theta,\n 'phi': phi\n }\n\n setattr(self, '_galaxy{0:d}_orbital_orientation'.format(galaxy_index), orbital_orientation)\n\n orbitals_properties = list(\n map(lambda radius, particle_number: {\n 'radius': radius,\n 'particle_number': particle_number\n }, orbital_radius, orbital_particles)\n )\n\n setattr(self, '_galaxy{0:d}_orbitals_properties'.format(galaxy_index), orbitals_properties)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr" ]
[ "0.59060866", "0.5602356", "0.54695225", "0.531587", "0.5250702", "0.51489925", "0.512252", "0.51054573", "0.50826424", "0.50823313", "0.50728947", "0.5068519", "0.5064257", "0.50619185", "0.504671", "0.5043761", "0.5035546", "0.5006031", "0.49904823", "0.49792513", "0.49603698", "0.49561316", "0.49539453", "0.49525517", "0.49416146", "0.49367455", "0.4935392", "0.4934906", "0.4926118", "0.49192843" ]
0.5937973
0
recover numerical values of CTF parameters from EMAN2 CTF object stored in a header of the input image
def get_ctf(ima): from EMAN2 import EMAN2Ctf ctf_params = ima.get_attr("ctf") return ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def extract(self,image_path):#image_path\r\n\r\n img = caffe.io.load_image(image_path)\r\n \r\n #image1=cv2.imread(caffe_root + 'examples/images/cat.jpg') \r\n #img=cv2.cvtColor(image1,cv2.COLOR_BGR2RGB) \r\n #img=img/255. \r\n \r\n\r\n transformed_image = self.transformer.preprocess('data', img)\r\n self.net.blobs['data'].data[...] = transformed_image\r\n ft = self.net.forward()\r\n ft = np.squeeze(ft['pool5/7x7_s1'])\r\n ft = ft / LA.norm(ft)\r\n return ft", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def test_read_with_cclib():\n main([\"-g\", \"/tmp/fnord.Gaussian.gjf\"])\n main([\"/tmp/fnord.Gaussian.gjf\", \"data/benzene.out\"])\n assert_equals(\n open(\"data/benzene.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/benzene.out\n\n0 1\nC 1.7458930000 1.7957530000 -1.0597530000\nC 0.9484120000 2.8689700000 -1.4311180000\nC 1.4480470000 1.0743490000 0.0876540000\nC -0.1470660000 3.2206120000 -0.6552520000\nC 0.3525690000 1.4259910000 0.8635200000\nC -0.4449120000 2.4992080000 0.4921550000\nH 2.5997410000 1.5203090000 -1.6651660000\nH 1.1810280000 3.4311430000 -2.3262420000\nH 2.0700040000 0.2375420000 0.3781170000\nH -0.7690240000 4.0574200000 -0.9457150000\nH 0.1199530000 0.8638180000 1.7586440000\nH -1.2987600000 2.7746520000 1.0975680000\n\n\"\"\",\n )", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def parse_cif(cif_name='iso.cif'):\n with open(cif_name) as f_iso:\n content = f_iso.readlines()\n u = np.zeros(6)\n for e in [line.strip().split() for line in content if len(line.strip().split()) == 2]:\n if 'cell_length_a' in e[0]:\n u[0] = float(e[1])\n elif 'cell_length_b' in e[0]:\n u[1] = float(e[1])\n elif 'cell_length_c' in e[0]:\n u[2] = float(e[1])\n elif 'cell_angle_alpha' in e[0]:\n u[3] = float(e[1])\n elif 'cell_angle_beta' in e[0]:\n u[4] = float(e[1])\n elif 'cell_angle_gamma' in e[0]:\n u[5] = float(e[1])\n a, b, c, alpha, beta, gamma = u\n cosdelta_up = np.cos(np.radians(alpha)) - np.cos(np.radians(beta))*np.cos(np.radians(gamma))\n cosdelta_low = np.sin(np.radians(beta))*np.sin(np.radians(gamma))\n cosdelta = cosdelta_up / cosdelta_low\n sindelta = np.sqrt(1-cosdelta**2)\n la = a*np.array([1.0, 0.0, 0.0])\n lb = b*np.array([np.cos(np.radians(gamma)), np.sin(np.radians(gamma)), 0.0])\n lc = c*np.array([np.cos(np.radians(beta)), np.sin(np.radians(beta))*cosdelta,\n np.sin(np.radians(beta))*sindelta])\n u_lc = lc/np.linalg.norm(lc)\n theta_c_rad = np.arccos(np.clip(np.dot(u_lc, [0, 0, 1]), -1.0, 1.0))\n return la, lb, lc, theta_c_rad", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def processCerFile(k, fb, newcorex=None, newcorey=None, sz=0):\n\n #---- Read Event Header\n evth = unpack('{}f'.format(evthSize), fb.read(evthSize * wordSize))\n #print(evth)\n\n primary = get_primary(evth)\n energy = get_energy(evth)\n height = get_height_first(evth)\n thetaEvtH, phiEvtH = get_direction(evth)\n coreX, coreY, coreD = get_core(evth)\n\n print('{:4d} {:3d} {:9d} {:6.1f} {:8.1f} {:7.1f} {:7.1f} {:8.1f} {:5.1f} {:5.1f}'\n .format(k, int(primary), sz, energy, height, coreX, coreY, coreD, thetaEvtH, phiEvtH))\n\n return\n\n #---- Read Cherenkov photons from file\n\n wl = 999.\n i = 0\n\n while wl > 0.5:\n cphotonData = fb.read(cphotonSize * wordSize)\n \n i = i + 1\n wl, x, y, u, v, t, h = unpack('{}f'.format(cphotonSize), cphotonData)\n w = sqrt(1.0 - u ** 2 - v ** 2)\n \n if wl < 1.:\n continue\n\n wl = wl - 101000.\n\n print('{} {} {:.2f} {:.2f} {:.2f} {:.6f} {:.6f} {:.6f} {:.8f} {:.2f}'\n .format(k, i, wl, x, y, u, v, w, t, h))", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def extractNOTCAMHeader(file):\n\n try:\n\n hdulist = pyfits.open(file)\n hdulist.close() \n \n if len(hdulist) > 0:\n prihdr = hdulist[0].header\n a = ['notcam','NC',prihdr]\n for i in range(1, len(hdulist)):\n a.append(hdulist[i].header)\t\n return a \n else:\n return ['ERROR']\n\n # Error\n\n except Exception, e:\n raise HeaderException(e)", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n return mpart, Lbox, rsdfac, acheck\n #", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def bvec_errorprop(header,fld,inc,azi,err_fld,err_inc,err_azi,cc_fi,cc_fa,cc_ia):\n # Get parameters from header\n crpix1 = header['CRPIX1']\n crpix2 = header['CRPIX2']\n cdelt1 = header['CDELT1']\n cdelt2 = header['CDELT2']\n crval1 = header['CRVAL1']\n crval2 = header['CRVAL2']\n rsun_obs = header['RSUN_OBS']\t#solar disk radius in arcsec\n crota2 = header['CROTA2']\t\t#neg p-angle\n crlt_obs = header['CRLT_OBS']\t#disk center latitude\n\n nx0 = fld.shape[1]\n ny0 = fld.shape[0]\n\n # Get longitude/latitude\n xi = np.zeros((ny0,nx0))\n eta = np.zeros((ny0,nx0))\n for i in range(nx0):\n xi[:,i] = ((i + 1 - crpix1)*cdelt1 + crval1)/rsun_obs\n for j in range(ny0):\n eta[j,:] = ((j + 1 - crpix2)*cdelt2 + crval2)/rsun_obs\n\n lat,lon = img2sph(xi,eta,lonc=0.0,latc=np.radians(crlt_obs),\n asd=np.radians(rsun_obs/3.6e3),pa=np.radians(-1*crota2))\n\n latc = np.radians(crlt_obs)\n lonc = 0.0\n pAng = np.radians((-1.0) * crota2)\n\n a11 = (-np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc)\n + np.cos(pAng)*np.cos(lon - lonc))\n a12 = (np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc)\n + np.sin(pAng)*np.cos(lon - lonc))\n a13 = (-np.cos(latc)*np.sin(lon - lonc))\n a21 = (-np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc)\n + np.cos(pAng)*np.sin(lon - lonc))\n - np.cos(lat)*np.cos(latc)*np.sin(pAng))\n a22 = (np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc)\n - np.sin(pAng)*np.sin(lon - lonc))\n + np.cos(lat)*np.cos(latc)*np.cos(pAng))\n a23 = (-np.cos(latc)*np.sin(lat)*np.cos(lon - lonc)\n + np.sin(latc)*np.cos(lat))\n a31 = (np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc)\n + np.cos(pAng)*np.sin(lon - lonc))\n - np.sin(lat)*np.cos(latc)*np.sin(pAng))\n a32 = (-np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc)\n - np.sin(pAng)*np.sin(lon - lonc))\n + np.sin(lat)*np.cos(latc)*np.cos(pAng))\n a33 = (np.cos(lat)*np.cos(latc)*np.cos(lon - lonc)\n + np.sin(lat)*np.sin(latc))\n\n # Sine/cosine\n sin_inc = np.sin(inc)\n cos_inc = np.cos(inc)\n sin_azi = np.sin(azi)\n cos_azi = np.cos(azi)\n\n # Covariance\n var_fld = err_fld * err_fld\n var_inc = err_inc * err_inc\n var_azi = err_azi * err_azi\n cov_fi = err_fld * err_inc * cc_fi\n cov_fa = err_fld * err_azi * cc_fa\n cov_ia = err_inc * err_azi * cc_ia\n\n # Partial derivatives\n dBp_dfld = (-a11*sin_inc*sin_azi + a12*sin_inc*cos_azi + a13*cos_inc)\n dBp_dinc = (-a11*cos_inc*sin_azi + a12*cos_inc*cos_azi - a13*sin_inc)*fld\n dBp_dazi = (-a11*sin_inc*cos_azi - a12*sin_inc*sin_azi)*fld\n\n dBt_dfld = (-a21*sin_inc*sin_azi + a22*sin_inc*cos_azi + a23*cos_inc)*(-1)\n dBt_dinc = (-a21*cos_inc*sin_azi + a22*cos_inc*cos_azi - a23*sin_inc)*fld*(-1)\n dBt_dazi = (-a21*sin_inc*cos_azi - a22*sin_inc*sin_azi)*fld*(-1)\n\n dBr_dfld = (-a31*sin_inc*sin_azi + a32*sin_inc*cos_azi + a33*cos_inc)\n dBr_dinc = (-a31*cos_inc*sin_azi + a32*cos_inc*cos_azi - a33*sin_inc)*fld\n dBr_dazi = (-a31*sin_inc*cos_azi - a32*sin_inc*sin_azi)*fld\n\n # Final variances\n var_bp = (dBp_dfld*dBp_dfld*var_fld\n + dBp_dinc*dBp_dinc*var_inc\n + dBp_dazi*dBp_dazi*var_azi\n + 2*dBp_dfld*dBp_dinc*cov_fi\n + 2*dBp_dfld*dBp_dazi*cov_fa\n + 2*dBp_dinc*dBp_dazi*cov_ia)\n\n var_bt = (dBt_dfld*dBt_dfld*var_fld\n + dBt_dinc*dBt_dinc*var_inc\n + dBt_dazi*dBt_dazi*var_azi\n + 2*dBt_dfld*dBt_dinc*cov_fi\n + 2*dBt_dfld*dBt_dazi*cov_fa\n + 2*dBt_dinc*dBt_dazi*cov_ia)\n\n var_br = (dBr_dfld*dBr_dfld*var_fld\n + dBr_dinc*dBr_dinc*var_inc\n + dBr_dazi*dBr_dazi*var_azi\n + 2*dBr_dfld*dBr_dinc*cov_fi\n + 2*dBr_dfld*dBr_dazi*cov_fa\n + 2*dBr_dinc*dBr_dazi*cov_ia)\n\n return var_bp,var_bt,var_br", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n if np.abs(acheck-aa)>1e-4:\n raise RuntimeError(\"Read a={:f}, expecting {:f}.\".format(acheck,aa))\n return(rsdfac)\n #", "def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps", "def test_reads_photomodeler_sigmas_from_report() -> None:\n imgsz = (4288, 2848)\n sigmas = {\n \"focal\": 0.001,\n \"xp\": 0.001,\n \"yp\": 7.1e-004,\n \"fw\": 1.7e-004,\n \"fh\": 0.0,\n \"k1\": 2.0e-007,\n \"k2\": 1.2e-009,\n \"k3\": 0.0,\n \"p1\": 3.5e-007,\n \"p2\": 0.0,\n }\n path = os.path.join(\"tests\", \"CalibrationReport.txt\")\n xcam_auto = PhotoModeler.from_report(path, imgsz=imgsz, sigmas=True)\n xcam_manual = PhotoModeler(imgsz=imgsz, **sigmas)\n assert vars(xcam_auto) == vars(xcam_manual)", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def read_camera_params(h5_dataset):\n fx = h5_dataset[0]\n fy = h5_dataset[1]\n skew = h5_dataset[2]\n cx = h5_dataset[3]\n cy = h5_dataset[4]\n K = np.array([[fx, skew, cx],\n [0, fy, cy],\n [0, 0, 1]], dtype=np.float64)\n R = np.array([[h5_dataset[5], h5_dataset[8], h5_dataset[11]],\n [h5_dataset[6], h5_dataset[9], h5_dataset[12]],\n [h5_dataset[7], h5_dataset[10], h5_dataset[13]]], dtype=np.float64)\n t = np.array([h5_dataset[14], h5_dataset[15], h5_dataset[16]], dtype=np.float64)\n return K, R, t" ]
[ "0.56780773", "0.56284475", "0.55668586", "0.5510405", "0.5448828", "0.5425909", "0.5414353", "0.54138607", "0.541011", "0.54011285", "0.5374928", "0.5361909", "0.5342145", "0.533246", "0.5314134", "0.5313142", "0.525259", "0.52504194", "0.5208928", "0.520613", "0.5202701", "0.51924586", "0.51777726", "0.51571214", "0.51507735", "0.512887", "0.5126869", "0.5121457", "0.5117099", "0.510186" ]
0.60774076
0
generate EMAN2 CTF object using values of CTF parameters given in the list p
def generate_ctf(p): from EMAN2 import EMAN2Ctf defocus = p[0] cs = p[1] voltage = p[2] pixel_size = p[3] bfactor = p[4] amp_contrast = p[5] if defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention defocus *= 1e-4 if amp_contrast < 1.0: from math import sqrt amp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1) ctf = EMAN2Ctf() if(len(p) == 6): ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast}) else: ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast,'dfdiff':p[6],'dfang':p[7]}) return ctf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_thermo(T_C, p):\n\ty = y_from_p(p)\n\tx = x_from_Tp(T_C+C_to_K, p)\n\treturn x, y", "def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def second_class_tp(p,n):\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.zeros(n)\n lcon = np.zeros(n)\n \n #uvar = np.ones(n)*1\n uvar = np.ones(n)*5\n lvar = np.ones(n)*0.5\n name = str(p)+'_'+str(n)+'_'+str(n)+'_l1_tp'+'.txt'\n #name = str(n)+'_'+str(p)+'_'+'_second_tp'+'.txt'\n Q = rog.hilb(p,n)\n # d=(di), di=sum qij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n B = np.zeros((n,n))\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def set_ctf(ima, p):\n\tfrom utilities import generate_ctf\n\tctf = generate_ctf( p )\n\tima.set_attr( \"ctf\", ctf )", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def gen_reaction(tabs):\n global pbeam\n pbeam = TLorentzVector(0, 0, Ebeam, Ebeam)\n global ptarg\n ptarg = TLorentzVector(0, 0, 0, m_proton)\n pinitial = pbeam + ptarg\n global s\n s = pinitial.Mag2()\n q_in = (s - m_proton**2) / (2 * math.sqrt(s))\n q_cm = math.sqrt((s - m_proton**2 + m_omega**2)**2 / (4 * s) - m_omega**2)\n EomegaCM = math.sqrt(m_omega**2 + q_cm**2)\n EprotonCM = math.sqrt(m_proton**2 + q_cm**2)\n costhetaCM = (2 * q_in * EomegaCM - m_omega**2 - tabs) / (2 * q_in * q_cm)\n if abs(costhetaCM) > 1:\n print \"tabs =\", tabs, \"is out of range, please try another value\"\n return 0\n costheta0 = random.Uniform(-1, 1)\n phi0 = random.Uniform(-math.pi, math.pi)\n costheta1 = random.Uniform(-1, 1)\n phi1 = random.Uniform(-math.pi, math.pi)\n pomega = gen_omega(costheta0, phi0, costheta1, phi1)\n sinthetaCM = math.sqrt(1 - costhetaCM**2)\n beta = TVector3(q_cm * sinthetaCM, 0, q_cm * costhetaCM) * (1 / EomegaCM)\n pomega.Boost(beta)\n pgamma[0].Boost(beta)\n pgamma[1].Boost(beta)\n pgamma[2].Boost(beta)\n global precoil\n precoil = TLorentzVector(-q_cm * sinthetaCM, 0, -q_cm * costhetaCM, EprotonCM)\n betaCM = pinitial.Vect() * (1 / pinitial[3])\n pgamma[0].Boost(betaCM)\n pgamma[1].Boost(betaCM)\n pgamma[2].Boost(betaCM)\n pomega.Boost(betaCM)\n precoil.Boost(betaCM)\n return pomega", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def new_param_energy(coords, params, topology, vecs, P=1.01, T=293.15,NPT=False,V=None,P_conv=1.e5,V_conv=1.e-6,Ener_conv=1.e-3,N_part=250.):\n\n #-------------------\n # CONSTANTS\n #-------------------\n kB = 0.0083145 #Boltzmann constant (Gas constant) in kJ/(mol*K)\n beta = 1/(kB*T)\n\n #-------------------\n # PARAMETERS\n #-------------------\n params = params\n\n mol2files = []\n for i in params:\n mol2files.append('../monomers/'+i.rsplit(' ',1)[0]+'.mol2')\n\n flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield\n mols = []\n mol = oechem.OEMol()\n for mol2file in mol2files:\n ifs = oechem.oemolistream(mol2file)\n ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)\n mol = oechem.OEGraphMol()\n while oechem.OEReadMolecule(ifs, mol):\n oechem.OETriposAtomNames(mol)\n mols.append(oechem.OEGraphMol(mol))\n K = len(params['cyclohexane'].keys())\n \n # Load forcefield file\n #ffxml = 'smirnoff99Frosst_with_AllConstraints.ffxml'#\n #print('The forcefield being used is smirnoff99Frosst_with_AllConstraints.ffxml')\n ffxml = get_data_filename('forcefield/smirnoff99Frosst.ffxml')\n print('The forcefield being used is smirnoff99Frosst.ffxml')\n\n ff = ForceField(ffxml)\n\n # Generate a topology\n top = topology#generateTopologyFromOEMol(mol)\n\n #-----------------\n # MAIN\n #-----------------\n\n # Calculate energies\n\n E_kn = np.zeros([K,len(coords)],np.float64)\n u_kn = np.zeros([K,len(coords)],np.float64)\n for i,j in enumerate(params):\n AlkEthOH_id = j\n for k,l in enumerate(params[AlkEthOH_id]):\n print(\"Anotha one\")\n for m,n in enumerate(params[AlkEthOH_id][l]):\n newparams = ff.getParameter(smirks=n[0])\n newparams[n[1]]=n[2]\n ff.setParameter(newparams,smirks=n[0])\n system = ff.createSystem(top,mols,nonbondedMethod=PME,nonbondedCutoff=1.125*nanometers,ewaldErrorTolerance=1.e-5)\n barostat = MonteCarloBarostat(P*bar, T*kelvin, 10)\n system.addForce(barostat)\n for o,p in enumerate(coords):\n e = get_energy(system,p,vecs[o])\n \n if not NPT:\n E_kn[k,o] = e._value\n u_kn[k,o] = e._value*beta\n else:\n E_kn[k,o] = e._value + P*P_conv*V[o]*V_conv*Ener_conv*N_part\n u_kn[k,o] = (e._value + P*P_conv*V[o]*V_conv*Ener_conv*N_part)*beta\n \n return E_kn,u_kn", "def __init__(self, coefficient, basefield=None):\n\n # parameter parse\n try:\n character = basefield.getCharacteristic()\n field = basefield\n except AttributeError:\n # backward compatibility\n if isinstance(basefield, int):\n field = finitefield.FinitePrimeField.getInstance(basefield)\n character = basefield\n else:\n raise ValueError(\"basefield must be FiniteField object.\")\n\n coeffs_list = []\n if isinstance(coefficient, list):\n for c in coefficient:\n if isinstance(c, int):\n coeff = field.createElement(c)\n elif c in field:\n coeff = c\n else:\n raise ValueError(\"coefficient not in basefield.\")\n coeffs_list.append(coeff)\n\n # general initialize\n ECGeneric.__init__(self, coeffs_list, field)\n\n zero = self.basefield.zero\n one = self.basefield.one\n\n # format attribute\n if self.ch == 2:\n if len(self) == 5:\n # FIXME\n if coeffs_list[0] % 2 == one and coeffs_list[2] % 2 == coeffs_list[3] % 2 == zero and coeffs_list[4]:\n self.a1 = one\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = one\n self.b4 = zero\n self.b6 = zero\n self.b8 = self.a6\n self.c4 = one\n self.c6 = one\n self.disc = self.a6\n self.j = self.disc.inverse()\n elif coeffs_list[0] % 2 == coeffs_list[1] % 2 == zero and coeffs_list[2]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = zero\n self.b6 = self.a3**2\n self.b8 = self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = self.a3**4\n self.j = zero\n else:\n raise ValueError(\"coefficient may be not representation of EC.\")\n else:\n raise ValueError(\"coefficient may only use full Weierstrass form for characteristic 2.\")\n elif self.ch == 3: # y^2=x^3+a2*x^2+a6 or y^2=x^3+a4*x+a6\n # FIXME\n if len(self) == 5:\n if coeffs_list[0] % 3 == coeffs_list[2] % 3 == coeffs_list[3] % 3 == 0 and coeffs_list[1] and coeffs_list[4]:\n self.a1 = zero\n self.a2 = coeffs_list[1]\n self.a3 = zero\n self.a4 = zero\n self.a6 = coeffs_list[4]\n self.b2 = self.a2\n self.b4 = zero\n self.b6 = self.a6\n self.b8 = self.a2*self.a6\n self.c4 = self.b2**2\n self.c6 = 2*self.b2**3\n self.disc = -self.a2**3*self.a6\n self.j = (-self.a2**3)*self.a6.inverse()\n elif coeffs_list[0] == coeffs_list[1] == coeffs_list[2] == 0 and coeffs_list[3]:\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = zero\n self.b4 = 2*self.a4\n self.b6 = self.a6\n self.b8 = 2*self.a4**2\n self.c4 = zero\n self.c6 = zero\n self.disc = -self.a4**3\n self.j = zero\n else:\n raise ValueError(\"can't defined EC.\")\n if not self.disc:\n raise ValueError(\"this curve is singular.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n else:\n if len(self) == 5:\n self.a1 = coeffs_list[0]\n self.a2 = coeffs_list[1]\n self.a3 = coeffs_list[2]\n self.a4 = coeffs_list[3]\n self.a6 = coeffs_list[4]\n self.b2 = self.a1**2+4*self.a2\n self.b4 = self.a1*self.a3+2*self.a4\n self.b6 = self.a3**2+4*self.a6\n self.b8 = self.a1**2*self.a6+4*self.a2*self.a6-self.a1*self.a3*self.a4+self.a2*self.a3**2-self.a4**2\n self.c4 = self.b2**2-24*self.b4\n self.c6 = -self.b2**3+36*self.b2*self.b4-216*self.b6\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n elif len(self) == 2:\n self.a = coeffs_list[0]\n self.b = coeffs_list[1]\n self.a1 = zero\n self.a2 = zero\n self.a3 = zero\n self.a4 = self.a\n self.a6 = self.b\n self.b2 = zero\n self.b4 = 2*self.a\n self.b6 = 4*self.b\n self.b8 = -(self.a**2)\n self.c4 = -48*self.a\n self.c6 = -864*self.b\n self.disc = -self.b2**2*self.b8-8*self.b4**3-27*self.b6**2+9*self.b2*self.b4*self.b6\n if self.disc:\n self.j = self.c4**3*self.disc.inverse()\n else:\n raise ValueError(\"coefficients creates singular curve.\")\n else:\n raise ValueError(\"coefficient is less or more, can't defined EC.\")\n\n self.ord = None\n self.abelian = None\n self.cubic = UniVarPolynomial({0:self.a6, 1:self.a4, 2:self.a2, 3:one},\n self.basefield)", "def generation(hid_pl, f_state, eps_z, eps_x, pd, fd):\n params_prior = fd['phi_prior'](hid_pl)\n z = sample(params_prior, eps_z, 'gauss')\n phi_z = fd['phi_z'](z)\n params_out = fd['phi_dec'](phi_z, hid_pl)\n x = sample(params_out, eps_x, pd['model'])\n\n phi_x = fd['phi_x'](x)\n f_in = tf.concat([phi_x, phi_z], axis=1, name='f_theta_joint_inputs')\n f_out, f_state = fd['f_theta'](f_in, f_state)\n return x, f_out, f_state", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def C_factory(P, n=2, V_type=\"clamped\"):\n\n # TODO: check that p_len is ok with the degree and > 0\n m = len(P) # the number of points in P\n D = len(P[0]) # the dimension of a point (2D, 3D)\n\n # Create the knot vector\n V = make_knot_vector(n, m, V_type)\n # TODO: check the validity of the input knot vector.\n # TODO: create an initial Vector Point.\n\n #############################################################################\n # The following line will be detailed later. #\n # We create the highest degree basis spline function, aka. our entry point. #\n # Using the recursive formulation of b-splines, this b_n will call #\n # lower degree basis_functions. b_n is a function. #\n #############################################################################\n b_n = basis_factory(n)\n\n @memoize\n def S(t, d):\n \"\"\" The b-spline funtion, as defined in eq. 3. \"\"\"\n out = 0.\n for i in range(m): #: Iterate over 0-indexed point indices\n out += P[i][d]*b_n(t, i, V)\n return out\n\n def C(t):\n \"\"\" The b-spline curve, as defined in eq. 4. \"\"\"\n out = [0.]*D #: For each t we return a list of D coordinates\n for d in range(D): #: Iterate over 0-indexed dimension indices\n out[d] = S(t,d)\n return out\n\n C.P = P #: The control polygone\n C.V = V #: The knot vector used by the function\n C.spline = S #: The spline function.\n C.basis = b_n #: The highest degree basis function. Useful to do some plotting.\n C.min = V[0] #: The domain of definition of the function, lower bound for t\n C.max = V[-1] #: The domain of definition of the function, upper bound for t\n C.endpoint = C.max!=V[-1] #: Is the upper bound included in the domain.\n return C", "def metamer(p):\r\n return Components(p, Scale=3)", "def generate_parameters(nid):\n G = EcGroup(nid)\n g = G.hash_to_point(b\"g\")\n o = G.order()\n return (g, o)", "def make_features(targs_pb, pf):\n camera, to_uvd, to_world, keys_uvd, _, visible, _ = utils.get_contents_pb(\n targs_pb.kp_target)\n num_kp = len(keys_uvd)\n # Restrict to max projection targets\n proj_targs = [\n utils.get_contents_pb(targ_pb) for targ_pb in targs_pb.proj_targets\n ][:utils.MAX_TARGET_FRAMES]\n targets_keys_uvd = []\n targets_to_uvd = []\n for proj_targ in proj_targs:\n _, to_uvd, _, keys_uvd, _, _, _ = proj_targ\n targets_keys_uvd.append(keys_uvd)\n targets_to_uvd.append(to_uvd)\n # Add dummy targets if necessary.\n num_targets = len(proj_targs)\n for _ in range(utils.MAX_TARGET_FRAMES - num_targets):\n targets_keys_uvd.append(utils.dummy_keys_uvd(num_kp))\n targets_to_uvd.append(utils.dummy_to_uvd())\n\n def feat_int(num):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[num]))\n\n def feat_floats(floats):\n return tf.train.Feature(float_list=tf.train.FloatList(value=floats))\n\n feats = {\n 'to_world_' + pf:\n feat_floats(to_world.flatten()),\n 'to_uvd_' + pf:\n feat_floats(to_uvd.flatten()),\n 'camera_' + pf:\n feat_floats(utils.cam_pb_to_array(camera)),\n 'keys_uvd_' + pf:\n feat_floats(np.array(keys_uvd).flatten()),\n 'visible_' + pf:\n feat_floats(visible),\n 'num_kp_' + pf:\n feat_int(num_kp),\n 'num_targets_' + pf:\n feat_int(num_targets),\n 'targets_to_uvd_' + pf:\n feat_floats(np.array(targets_to_uvd).flatten()),\n 'targets_keys_uvd_' + pf:\n feat_floats(np.array(targets_keys_uvd).flatten()),\n 'mirrored':\n feat_int(int(targs_pb.mirrored)),\n }\n return feats", "def gen_parameter(self, g, ng, p):\n pass", "def get_m_eng_body(f_eng_body, P):\n m = np.zeros(3)\n for i in range(0, P.eng_nb):\n m += np.cross(P.eng_pos[i], f_eng_body[i])\n return m", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def PyC_Punto(P):\n C = CPunto()\n\n C.x[0] = P['x'][0]\n C.x[1] = P['x'][1]\n C.x[2] = P['x'][2]\n \n C.y[0] = P['y'][0]\n C.y[1] = P['y'][1]\n C.y[2] = P['y'][2]\n\n C.D = P['D']\n \n C.n = P['n']\n C.a, C.b, C.c = P['a'], P['b'], P['c']\n C.d, C.e, C.f = P['d'], P['e'], P['f']\n\n return C", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)", "def tcs(self,lpf=0, opf=1):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n \n # store initial beam data\n self.LD[0][0] = S.pos\n\n #Mean data\n self.LD[0][1] = S.moment0_env[0]\n self.LD[0][2] = S.moment0_env[2]\n self.LD[0][3] = S.moment0_env[4]\n self.LD[0][4] = S.moment0_rms[0]\n self.LD[0][5] = S.moment0_rms[2]\n self.LD[0][6] = S.moment0_rms[4]\n self.LD[0][7] = S.ref_phis\n self.LD[0][8] = S.ref_IonEk\n\n # store initial beam data\n self.LD2[0][0] = S.pos\n #Mean data\n self.LD2[0][1] = S.moment0_env[1]\n self.LD2[0][2] = S.moment0_env[3]\n self.LD2[0][3] = S.moment0_env[5]\n self.LD2[0][4] = S.moment0_rms[1]\n self.LD2[0][5] = S.moment0_rms[3]\n self.LD2[0][6] = S.moment0_rms[5]\n\n\n # propagate step by step and store beam data\n for i in range(1,len(self.M)):\n self.M.propagate(S, i, 1)\n \n \n self.LD[i][0] = S.pos\n #Mean data\n self.LD[i][1] = S.moment0_env[0]\n self.LD[i][2] = S.moment0_env[2]\n self.LD[i][3] = S.moment0_env[4]\n self.LD[i][4] = S.moment0_rms[0]\n self.LD[i][5] = S.moment0_rms[2]\n self.LD[i][6] = S.moment0_rms[4]\n self.LD[i][7] = S.ref_phis\n self.LD[i][8] = S.ref_IonEk\n\n self.LD2[i][0] = S.pos\n #Mean data\n self.LD2[i][1] = S.moment0_env[1]\n self.LD2[i][2] = S.moment0_env[3]\n self.LD2[i][3] = S.moment0_env[5]\n self.LD2[i][4] = S.moment0_rms[1]\n self.LD2[i][5] = S.moment0_rms[3]\n self.LD2[i][6] = S.moment0_rms[5]\n\n #output data for plotting\n if opf: np.savetxt('ldata.txt',self.LD)\n\n if not lpf: return S" ]
[ "0.5515874", "0.5445426", "0.5421652", "0.54088384", "0.5321685", "0.5316981", "0.5300015", "0.52771187", "0.52574724", "0.5255685", "0.52163506", "0.5180207", "0.5175576", "0.51701", "0.5159775", "0.51574296", "0.51267034", "0.51238847", "0.5120029", "0.5111619", "0.5101273", "0.50828934", "0.5080192", "0.5072543", "0.5065332", "0.5045554", "0.50391465", "0.5030391", "0.5012922", "0.5011559" ]
0.77108574
0
set EMAN2 CTF object in the header of input image using values of CTF parameters given in the list p
def set_ctf(ima, p): from utilities import generate_ctf ctf = generate_ctf( p ) ima.set_attr( "ctf", ctf )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def setheaders(f):\n f.headers['OBSERVER'] = \"'%s'\" % camera.status.observer\n f.headers['FILTERID'] = \"'%s'\" % filtname(camera.status.filter)\n f.headers['FILTER'] = \"%1d\" % camera.status.filter\n f.headers['XYSTAGE'] = \"'%d,%d'\" % camera.status.guider\n f.headers['MIRROR'] = \"'%s'\" % camera.status.mirror\n if camera.status.imgtype == 'BIAS':\n f.headers['BIAS'] = camera.status.object\n elif camera.status.imgtype == 'DARK':\n f.headers['DARK'] = camera.status.object\n else:\n f.headers['OBJECT'] = camera.status.object\n try:\n skytemp = weather.status.skytemp\n f.headers['SKYTEMP'] = \"%4.1f\" % skytemp\n f.comments['SKYTEMP'] = \"'Infrared sky temp in degC'\"\n except:\n pass\n\n try:\n if not camera.status.TJ.current.posviolate: #Position calibrated to epoch\n ra = camera.status.TJ.current.Ra/15/3600\n dec = camera.status.TJ.current.Dec/3600\n epoch = camera.status.TJ.current.Epoch\n alt = camera.status.TJ.current.Alt\n GotTJ = True\n elif camera.status.TJ.current.RaC:\n ra = camera.status.TJ.current.RaC\n dec = camera.status.TJ.current.DecC\n alt = camera.status.TJ.current.Alt\n t = time.gmtime()\n epoch = t.tm_year + (t.tm_yday/366.0)\n GotTJ = True\n else:\n GotTJ = False\n except AttributeError:\n GotTJ = False \n if GotTJ:\n f.headers['RA_OBJ'] = \"%12.9f\" % (ra*15.0)\n f.headers['RA'] = \"'%s'\" % sexstring(ra)\n f.headers['DEC_OBJ'] = \"%13.9f\" % dec\n f.headers['DEC'] = \"'%s'\" % sexstring(dec)\n f.headers['EQUINOX'] = \"%6.1f\" % epoch\n f.headers['SECZ'] = \"%6.3f\" % (1/math.cos((90-alt)*math.pi/180))\n if GotFT:\n hjd,message = fitstime.findtime(fimage=f, verbose=0, allfields=0)\n if type(hjd) == float:\n f.headers['HJD'] = \"%f\" % hjd\n f.comments['HJD'] = \"Heliocentric Julian Day at exposure midpoint\"", "def __call__(cls, nir_paw_image_fname, nir_paw_conf_fname, output_template, conf_limit):\n\n# on with the show\n logger.info('Opening science and confidence frames')\n ifits=fitsio.FITS(nir_paw_image_fname,'r')\n cfits=fitsio.FITS(nir_paw_conf_fname,'r')\n\n#\n# Check that the number of HDUs match\n#\n\n if (len(ifits) != len(cfits)):\n print(\"Number of HDUs/extensions in IMAGE and CONFidence files do not match.\")\n print(\"Aborting\")\n exit(1)\n\n p_ih=ifits[0].read_header()\n p_ch=cfits[0].read_header()\n# Remove reserve keywords\n p_ih.clean()\n\n#\n# Extract some keywords from PRIMARY header to propagate into the individual images.\n#\n base_dict={}\n base_header=[]\n for hkeep in nci.nir_paw_primary_keep:\n if (hkeep in p_ih):\n base_header.append({'name':hkeep,'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)})\n base_dict[hkeep]={'value':p_ih[hkeep],'comment':p_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,0))\n#\n# If possible, need too keep track of REQTIME (requested frametime) because sometimes \n# EXPTIME seems to be mispopulated in the CCD image HDUs with TEXPTIME\n#\n if ('TEXPTIME' in p_ih):\n texptime=p_ih['TEXPTIME']\n else:\n texptime=None\n if ('REQTIME' in p_ih):\n reqtime=p_ih['REQTIME']\n else:\n reqtime=None\n#\n# print(base_header)\n \n\n#\n# Step through HDUs... and form \"CCD\" images for each HDU\n#\n ExtList=[]\n for hnum in range(1,len(ifits)):\n print(\"############ Begin work on extnum={:d} ###############\".format(hnum))\n\n# Check that extensions match (after that concentrate on image).\n print(hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname())\n if (ifits[hnum].get_extname() != cfits[hnum].get_extname()):\n print(\"Working on extension {:d}. Extension names (image,conf) of ([{:s}],[{:s}]) do not match!\".format(\n hnum,ifits[hnum].get_extname(),cfits[hnum].get_extname()))\n print(\"Aborting!\")\n exit(1)\n\n f_ih=ifits[hnum].read_header()\n f_ih.clean()\n#\n# Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n#\n exptime=f_ih['EXPTIME']\n if (reqtime is not None):\n if (exptime > reqtime):\n print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n print(\" Primary HDU: TEXPTIME: {:}\".format(texptime))\n print(\" Primary HDU: REQTIME: {:}\".format(reqtime))\n print(\" Current HDU: EXPTIME: {:} --> {:}\".format(exptime,reqtime))\n exptime=reqtime\n f_ih['EXPTIME']=reqtime\n#\n# Augment keywords pulled from primary header with keywords from current HDU\n#\n c_header=base_header[:]\n c_dict=dict(base_dict)\n for hkeep in nci.nir_paw_hdu_keep:\n if (hkeep in f_ih):\n# print(hkeep,f_ih[hkeep],f_ih.get_comment(hkeep))\n c_header.append({'name':hkeep,'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)})\n if (hkeep in c_dict):\n print(\"Warning: Replacing keyword {:s} with value from hdu={:d}\".format(hkeep,hnum))\n c_dict[hkeep]={'value':f_ih[hkeep],'comment':f_ih.get_comment(hkeep)}\n else:\n print(\"Keyword {:s} missing in HDU[{:d}]\".format(hkeep,hnum))\n\n#\n# Get the CCDNUM from special keyword and propagate\n# Get SKYLEVEL, SKYNOISE, ZEROPOINT and form basis value for the weight plane\n#\n ccdnum=f_ih['HIERARCH ESO DET CHIP NO']\n c_header.append({'name':'CCDNUM','value':ccdnum,'comment':'Unique Detector Number'})\n\n# exptime=f_ih['EXPTIME']\n## Fix occurences where the CCD-level keyword EXPTIME has inherited the value of TEXPTIME\n# if (exptime > reqtime):\n# print(\"Warning: possible corrupt EXPTIME (total exptime rather than frame time present).\")\n# print(\"Attempting to update EXTIME to REQTIME (requested frame time).\")\n# print(\" Primary HDU: TEXPTIME: {:.2f}\".format(texptime))\n# print(\" Primary HDU: REQTIME: {:.2f}\".format(reqtime))\n# print(\" Current HDU: EXPTIME: {:.2f} --> {:.2f}\".format(exptime,reqtime))\n# exptime=reqtime\n# f_ih['EXPTIME']=reqtime\n\n mtime=2.5*np.log10(exptime)\n skylev=f_ih['SKYLEVEL']\n skyrms=f_ih['SKYNOISE']\n seeing=f_ih['SEEING']\n magzpt=f_ih['MAGZPT']\n\n# zeropoint include a correction from VEGA->AB\n# zeropoint in headers was found to have a factor for EXPTIME removed (have to add back in for DES-like processing)\n\n if (p_ih['BAND'] in nci.nir_vega_to_ab):\n magzpt=magzpt+nci.nir_vega_to_ab[p_ih['BAND']]+mtime\n else:\n print(\"Warning! Unknown BAND ({:s}) for conversion of zeropoint from VEGA to AB system\".format(p_ih['BAND']))\n\n c_header.append({'name':'SKYBRITE', 'value':skylev, 'comment':'Sky level estimate from IMCORE'})\n c_header.append({'name':'SKYSIGMA', 'value':skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARA', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'SKYVARB', 'value':skyrms*skyrms, 'comment':'Sky noise estimate from IMCORE'})\n c_header.append({'name':'FWHM', 'value':seeing, 'comment':'Average FWHM (pixels)'})\n c_header.append({'name':'MAG_ZERO', 'value':magzpt, 'comment':'Converted MAGZPT(Vega) to AB system'})\n nite_val=convert_utc_str_to_nite(f_ih['DATE-OBS'])\n c_header.append({'name':'NITE', 'value':nite_val, 'comment':'Observation Nite'})\n c_header.append({'name':'SATURATE', 'value':nci.nircam_satval[ccdnum], 'comment': 'Saturation Level (ADU)'})\n c_header.append({'name':'PIXSCAL1', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n c_header.append({'name':'PIXSCAL2', 'value':0.341, 'comment': 'Fiducial pixel scale (arcsec/pix)'})\n\n# bval=f_ih['BSCALE']\n# print(\"BSCALE was: \",bval)\n print(\"SKYLEVEL was: \",skylev)\n print(\"SKYRMS was: \",skyrms)\n#\n# Searching for a proper WGT prescription\n#\n# This was what I took to be equivalent to DES (but perhaps it does not properly factor in N-image stack\n# wgtval=skylev+(skyrms*skyrms)\n print(\"SKYLEV + (SKYRMS*SKYRMS): \",skylev+(skyrms*skyrms))\n#\n# This was assuming SKYLEVEL does not properly inform stats\n# wgtval=(skyrms*skyrms)\n print(\"(SKYRMS*SKYRMS): \",skyrms*skyrms)\n\n#\n# Read the image data from the science and confidence files.\n#\n sci_data=ifits[hnum].read()\n print(\"Median of data {:.3f} \".format(np.median(sci_data)))\n conf_data=cfits[hnum].read()\n\n#\n# Better seemed to be a re-measurement of STD\n#\n print(\"Attempting an improved SKYRMS with 3-sigma clip to remove objects\")\n avgval, medval, stdval = medclip(sci_data,verbose=3)\n# print(avgval,medval,stdval)\n print(\"stdval^2: \",stdval*stdval)\n wgtval=(stdval*stdval)\n# print(wgtval)\n#\n# Use the new (i.e. chip-based header) to feed a WCS \n# Use image size to feed calculations for center and corners (similar to despyastro.CCD_corners\n#\n print(\"Calculating center/corners assuuming native ZPN projection\")\n w=WCS(fitsio.FITSHDR(c_header))\n\n fnax2=float(sci_data.shape[0])\n fnax1=float(sci_data.shape[1])\n corn_x=np.array([fnax1/2.0,1.,fnax1,fnax1,1.])\n corn_y=np.array([fnax2/2.0,1.,1.,fnax2,fnax2])\n sky = w.pixel_to_world(corn_x,corn_y)\n corn_ra=sky.ra.degree\n corn_dec=sky.dec.degree\n\n c_header.append({'name':'RA_CENT', 'value':corn_ra[0], 'comment':'RA center'})\n c_header.append({'name':'DEC_CENT','value':corn_dec[0],'comment':'DEC center'})\n for i in range(1,5):\n c_header.append({'name':'RAC{:d}'.format(i), 'value':corn_ra[i], 'comment':'RA corner {:d}'.format(i)})\n c_header.append({'name':'DECC{:d}'.format(i),'value':corn_dec[i],'comment':'DEC corner {:d}'.format(i)})\n RACMIN, RACMAX, DECCMIN, DECCMAX, CROSSRA0 = get_DESDM_corners_extent(corn_ra, corn_dec)\n c_header.append({'name':'RACMIN', 'value':RACMIN, 'comment':'Minimum extent of image in RA'})\n c_header.append({'name':'RACMAX', 'value':RACMAX, 'comment':'Maximum extent of image in RA'})\n c_header.append({'name':'DECCMIN', 'value':DECCMIN, 'comment':'Minimum extent of image in Declination'})\n c_header.append({'name':'DECCMAX', 'value':DECCMAX, 'comment':'Maximum extent of image in Declination'})\n c_header.append({'name':'CROSSRA0','value':CROSSRA0,'comment':'Does Image Span RA 0h (Y/N)'})\n c_header.append({'name':'DESEPOCH','value':'NIREPOCH','comment':'Default DES epoch definition for including NIR data'})\n#\n#\n#\n print(\"Stripping ZPN projection from WCS and creating a shift to get a rough TAN\")\n recs_to_delete=[] \n for i, hrec in enumerate(c_header):\n if (hrec['name'] == 'CTYPE1'):\n c_header[i]['value']='RA---TAN'\n if (hrec['name'] == 'CTYPE2'):\n c_header[i]['value']='DEC--TAN'\n\n if (hrec['name'] == 'CRVAL1'):\n c_header[i]['value']=corn_ra[0]\n if (hrec['name'] == 'CRVAL2'):\n c_header[i]['value']=corn_dec[0]\n if (hrec['name'] == 'CRPIX1'):\n c_header[i]['value']=fnax1/2.0\n if (hrec['name'] == 'CRPIX2'):\n c_header[i]['value']=fnax2/2.0\n\n if (hrec['name'] in ['PV2_1','PV2_2','PV2_3','PV2_4','PV2_5']):\n recs_to_delete.append(i)\n if (len(recs_to_delete) > 0):\n for i in sorted(recs_to_delete,reverse=True):\n x=c_header.pop(i)\n print(\"Removing: {:}\".format(x))\n\n whack=WCS(fitsio.FITSHDR(c_header))\n skyhack = whack.pixel_to_world(corn_x,corn_y)\n whack_corn_ra=skyhack.ra.degree\n whack_corn_dec=skyhack.dec.degree\n for i in range(5):\n cosdec=np.cos(corn_dec[i]*np.pi/180.)\n dra=3600.*(corn_ra[i]-whack_corn_ra[i])*cosdec\n ddec=3600.*(corn_dec[i]-whack_corn_dec[i])\n print(\" WCS shift {:d} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} \".format(ccdnum,corn_ra[i],corn_dec[i],whack_corn_ra[i],whack_corn_dec[i],dra,ddec))\n\n# for i, hrec in enumerate(c_header):\n# print(i,hrec)\n\n#\n# Form the SCI, MSK, and WGT HDUs\n#\n im=DESImage(init_data=True,init_mask=True,init_weight=True,shape=sci_data.shape)\n\n im.data=np.float32(sci_data)\n msk_wsm=np.where(conf_data<conf_limit)\n im.mask[msk_wsm] |= BADPIX_BPM\n im.weight=np.float32(conf_data/100./wgtval)\n#\n# Check for extra conditions where further masking is needed\n# Here is where CCD=6 check was started (now removed and placed \n# in nir_starmask to take advantage of bright object masking\n#\n\n\n#\n# Deal with individual header-isms and write out SCI, MSK, WGT\n# Note this is using fitsio (duplicating some of the DESIMAGE.save \n# but customization was needed to deal with foibles of the current\n#\n fname=re.sub('%02d','{:02d}'.format(ccdnum),output_template,1)\n ofits = fitsio.FITS(fname, 'rw', clobber=True)\n\n im.header=fitsio.FITSHDR(c_header) \n im.header['DES_EXT']='IMAGE'\n im.header = update_hdr_compression(im.header, 'SCI')\n ofits.write(im.data,extname='SCI',header=im.header)\n\n\n im.mask_hdr=fitsio.FITSHDR(c_header) \n im.mask_hdr['DES_EXT']='MASK'\n im.mask_hdr = update_hdr_compression(im.mask_hdr, 'MSK')\n im.mask_hdr['DES_EXT']='MASK'\n ofits.write(im.mask,extname='MSK',header=im.mask_hdr)\n\n# im.weight_hdr=fitsio.FITSHDR(c_header) \n# print(im.weight_hdr)\n im.weight_hdr = update_hdr_compression(im.weight_hdr, 'WGT')\n# print(im.weight_hdr)\n im.weight_hdr['DES_EXT']='WEIGHT'\n ofits.write(im.weight,extname='WGT',header=im.weight_hdr)\n\n ofits.close()\n print(\"Wrote {:s}\".format(fname))\n print(\" \")\n \n\n ifits.close()\n cfits.close()\n\n ret_code = 0\n return ret_code", "def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out", "def processCerFile(k, fb, newcorex=None, newcorey=None, sz=0):\n\n #---- Read Event Header\n evth = unpack('{}f'.format(evthSize), fb.read(evthSize * wordSize))\n #print(evth)\n\n primary = get_primary(evth)\n energy = get_energy(evth)\n height = get_height_first(evth)\n thetaEvtH, phiEvtH = get_direction(evth)\n coreX, coreY, coreD = get_core(evth)\n\n print('{:4d} {:3d} {:9d} {:6.1f} {:8.1f} {:7.1f} {:7.1f} {:8.1f} {:5.1f} {:5.1f}'\n .format(k, int(primary), sz, energy, height, coreX, coreY, coreD, thetaEvtH, phiEvtH))\n\n return\n\n #---- Read Cherenkov photons from file\n\n wl = 999.\n i = 0\n\n while wl > 0.5:\n cphotonData = fb.read(cphotonSize * wordSize)\n \n i = i + 1\n wl, x, y, u, v, t, h = unpack('{}f'.format(cphotonSize), cphotonData)\n w = sqrt(1.0 - u ** 2 - v ** 2)\n \n if wl < 1.:\n continue\n\n wl = wl - 101000.\n\n print('{} {} {:.2f} {:.2f} {:.2f} {:.6f} {:.6f} {:.6f} {:.8f} {:.2f}'\n .format(k, i, wl, x, y, u, v, w, t, h))", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('CDELT3', phi_array[1]-phi_array[0])\n fhdr.set('CTYPE3', 'FARDEPTH')\n fhdr.set('CUNIT3', 'RAD/M^2')\n return fhdr", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def SetMetadata(IMAGE,METADATA):\n IMAGE.SetSpacing(METADATA[0])\n IMAGE.SetOrigin(METADATA[1])\n IMAGE.SetDirection(METADATA[2])", "def set_field(self,Hext):\n self.raw_parameters[\"Hext\"] = Hext\n self.parameters = NormalizedParameters(self.raw_parameters)\n self._load()", "def __init__(self, encut, name=\"scf_settings\"):\n InputParameters.__init__(self, name=name)\n self.update_electronic_settings(\"ENCUT\", encut)", "def read_kitti_label(file, p2, use_3d_for_2d=False):\n\n gts = []\n\n text_file = open(file, 'r')\n\n '''\n Values Name Description\n ----------------------------------------------------------------------------\n 1 type Describes the type of object: 'Car', 'Van', 'Truck',\n 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',\n 'Misc' or 'DontCare'\n 1 truncated Float from 0 (non-truncated) to 1 (truncated), where\n truncated refers to the object leaving image boundaries\n 1 occluded Integer (0,1,2,3) indicating occlusion state:\n 0 = fully visible, 1 = partly occluded\n 2 = largely occluded, 3 = unknown\n 1 alpha Observation angle of object, ranging [-pi..pi]\n 4 bbox 2D bounding box of object in the image (0-based index):\n contains left, top, right, bottom pixel coordinates\n 3 dimensions 3D object dimensions: height, width, length (in meters)\n 3 location 3D object location x,y,z in camera coordinates (in meters)\n 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]\n 1 score Only for results: Float, indicating confidence in\n detection, needed for p/r curves, higher is better.\n '''\n\n pattern = re.compile(('([a-zA-Z\\-\\?\\_]+)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+'\n + '(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s+(fpat)\\s*((fpat)?)\\n')\n .replace('fpat', '[-+]?\\d*\\.\\d+|[-+]?\\d+'))\n\n for line in text_file:\n\n parsed = pattern.fullmatch(line)\n\n # bbGt annotation in text format of:\n # cls x y w h occ x y w h ign ang\n if parsed is not None:\n\n obj = edict()\n\n ign = False\n\n cls = parsed.group(1) # type\n trunc = float(parsed.group(2))\n occ = float(parsed.group(3))\n alpha = float(parsed.group(4))\n\n x = float(parsed.group(5)) # left\n y = float(parsed.group(6)) # top\n x2 = float(parsed.group(7)) # right\n y2 = float(parsed.group(8)) # bottom\n\n width = x2 - x + 1\n height = y2 - y + 1\n\n h3d = float(parsed.group(9))\n w3d = float(parsed.group(10))\n l3d = float(parsed.group(11))\n\n cx3d = float(parsed.group(12)) # center of car in 3d\n cy3d = float(parsed.group(13)) # bottom of car in 3d\n cz3d = float(parsed.group(14)) # center of car in 3d\n rotY = float(parsed.group(15))\n\n # actually center the box\n cy3d -= (h3d / 2)\n\n elevation = (1.65 - cy3d) # height above sea level\n\n if use_3d_for_2d and h3d > 0 and w3d > 0 and l3d > 0:\n\n # re-compute the 2D box using 3D (finally, avoids clipped boxes)\n verts3d, corners_3d = project_3d(p2, cx3d, cy3d, cz3d, w3d, h3d, l3d, rotY, return_3d=True)\n\n # any boxes behind camera plane?\n if np.any(corners_3d[2, :] <= 0):\n ign = True\n\n else: # 3d for 2d\n x = min(verts3d[:, 0])\n y = min(verts3d[:, 1])\n x2 = max(verts3d[:, 0])\n y2 = max(verts3d[:, 1])\n\n width = x2 - x + 1\n height = y2 - y + 1\n\n else:\n verts3d, corners_3d = np.zeros((8, 2)), np.zeros((3, 8))\n\n # project cx, cy, cz\n coord3d = p2.dot(np.array([cx3d, cy3d, cz3d, 1]))\n\n # store the projected instead\n cx3d_2d = coord3d[0]\n cy3d_2d = coord3d[1]\n cz3d_2d = coord3d[2] # TODO: depth?\n\n # 3d center to 2d, image coordinate\n cx = cx3d_2d / cz3d_2d\n cy = cy3d_2d / cz3d_2d\n\n # encode occlusion with range estimation\n # 0 = fully visible, 1 = partly occluded\n # 2 = largely occluded, 3 = unknown\n if occ == 0:\n vis = 1\n elif occ == 1:\n vis = 0.66\n elif occ == 2:\n vis = 0.33\n else:\n vis = 0.0\n\n while rotY > math.pi: rotY -= math.pi * 2\n while rotY < (-math.pi): rotY += math.pi * 2\n\n # recompute alpha\n alpha = convertRot2Alpha(rotY, cz3d, cx3d) # TODO: why don't use alpha in Kitti directly?\n\n obj.elevation = elevation\n obj.cls = cls\n obj.occ = occ > 0\n obj.ign = ign\n obj.visibility = vis\n obj.trunc = trunc\n obj.alpha = alpha\n obj.rotY = rotY\n\n # is there an extra field? (assume to be track)\n if len(parsed.groups()) >= 16 and parsed.group(16).isdigit(): obj.track = int(parsed.group(16))\n\n obj.bbox_full = np.array([x, y, width, height])\n obj.bbox_3d = [cx, cy, cz3d_2d, w3d, h3d, l3d, alpha, cx3d, cy3d, cz3d, rotY]\n # 2d center, depth, 3d shape, alpha, 3d center, rY\n obj.center_3d = [cx3d, cy3d, cz3d]\n # print(verts3d[:8], corners_3d)\n # 8 * 2 x, y\n # [[716.2700834 144.0556177]\n # [820.29305993 144.00207322]\n # [820.29305993 307.58688203]\n # [808.68674867 300.53454034]\n # [808.68674867 146.02789809]\n # [710.44462716 146.07566844]\n # [710.44462716 300.36824124]\n # [716.2700834 307.40048192]]\n\n # 3 * 8, x, y (height), z (depth)\n # [[1.23763004 2.43757004 2.43757004 2.44236996 2.44236996 1.24242996 1.24242996 1.23763004]\n # [-0.42 -0.42 1.47 1.47 -0.42 -0.42 1.47 1.47]\n # [8.1760119 8.1640121 8.1640121 8.6439881 8.6439881 8.6559879 8.6559879 8.1760119]]\n obj.vertices = verts3d[:8].T.flatten()\n obj.corners_3d = corners_3d.flatten()\n\n gts.append(obj)\n\n text_file.close()\n\n return gts", "def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def convert2EbnerParamOriginalParam(listSlice,list_prefix,directory,paramAx,paramCor,paramSag):\n paramAx=np.load(paramAx)\n paramCor=np.load(paramCor)\n paramSag=np.load(paramSag)\n param=[]\n param.append(paramAx)\n param.append(paramCor)\n param.append(paramSag)\n \n images,mask = createVolumesFromAlist(listSlice.copy()) #list of images corresponding to differents original stacks\n \n \n mat = np.array([[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]) #matrix to convert affine matrix from nibabel to itk\n\n for n in range(len(images)): #for each stack\n \n imagen = images[n]\n \n for i_slice in range(len(images[n])): #for each slices (in each stacks)\n \n slicei=imagen[i_slice]\n dimension=3\n X,Y,Z= slicei.get_slice().get_fdata().shape\n transfo = param[n][slicei.get_index_slice(),:,:]\n #print()\n matrix = mat @ transfo @ mat\n #print(matrix)\n test = sitk.AffineTransform(dimension)\n test.SetMatrix(matrix[0:3,0:3].flatten())\n test.SetTranslation(matrix[0:3,3])\n images_index = slicei.get_index_image()\n\n sitk.WriteTransform(test,\"%s/%s_slice%d.tfm\" %(directory,list_prefix[images_index],slicei.get_index_slice())) #save rigid transformation, computed at the barycenter of the image, adatpted to itk", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, buck=None, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n if buck is None:\n self.buck_pms = []\n else:\n self.buck_pms = [] # TODO:\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh,\n self.eps_ult, *self.buck_pms, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def __init__(self, osi, fy, fu, es, esh, eps_sh, eps_ult, cf, alpha_2, cd):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.es = float(es)\n self.esh = float(esh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.cf = float(cf)\n self.alpha_2 = alpha_2\n self.cd = float(cd)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.es, self.esh, self.eps_sh, self.eps_ult, '-CMFatigue', self.cf, self.alpha_2, self.cd]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, osi, fy, fu, e_mod, e_mod_sh, eps_sh, eps_ult, lsr, alpha=1.0, cm_fatigue=None, mp_curve=None):\n self.osi = osi\n self.fy = float(fy)\n self.fu = float(fu)\n self.e_mod = float(e_mod)\n self.e_mod_sh = float(e_mod_sh)\n self.eps_sh = float(eps_sh)\n self.eps_ult = float(eps_ult)\n self.lsr = float(lsr)\n self.alpha = float(alpha)\n if cm_fatigue is None:\n self.cm_fatigue = None\n self.cm_params = []\n else:\n self.cm_fatigue = cm_fatigue\n self.cm_params = ['-CMFatigue', cm_fatigue['cf'], cm_fatigue['alpha'], cm_fatigue['cd']]\n if mp_curve is None:\n self.mp_curve = None\n self.mp_params = []\n else:\n self.mp_curve = mp_curve\n r1 = self.mp_curve.setdefault('r1', 0.333)\n r2 = self.mp_curve.setdefault('r2', 18)\n r3 = self.mp_curve.setdefault('r3', 4)\n self.mp_params = ['-MPCurveParams', r1, r2, r3]\n\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.fu, self.e_mod, self.e_mod_sh, self.eps_sh, self.eps_ult, '-DMBuck', self.lsr, self.alpha, *self.cm_params, *self.mp_params]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\"}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def load_charmm_ff_params(fname):\n with open(fname) as f:\n lines = f.readlines()\n\n comment_stripper = re.compile(r'[!\\*].*')\n ffp = ForceFieldParams(fname)\n\n current_section = None\n for i in range(len(lines)):\n # Ignore comments and blank lines\n line = comment_stripper.sub('', lines[i].strip())\n if line == '': continue\n\n tokens = line.split()\n skip_line = False\n for section in ('ATOM', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB', 'CMAP'):\n if tokens[0].startswith(section):\n current_section = section\n skip_line = True\n break\n\n if skip_line: continue\n\n if current_section is 'BOND':\n key1, key2 = key_names((tokens[0], tokens[1]))\n ffp.bonds[key1] = ffp.bonds[key2] = {\n 'force_constant': float(tokens[2]),\n 'equilibrium_distance': float(tokens[3])\n }\n elif current_section is 'ANGL':\n # TODO: Urey-Bradley terms\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2]))\n ffp.angles[key1] = ffp.angles[key2] = {\n 'force_constant': float(tokens[3]),\n 'equilibrium_angle': float(tokens[4]) * pi / 180.0\n }\n elif current_section is 'DIHE':\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n ffp.dihedrals[key1] = ffp.dihedrals[key2] = {\n 'force_constant': float(tokens[4]),\n 'multiplicity': float(tokens[5]),\n 'delta': float(tokens[6])\n }\n elif current_section is 'IMPR':\n key = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n else:\n # Unknown line type\n continue\n return ffp", "def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs", "def set_header( name, value ):", "def read_param_phil(self):\n\n # LABELIT target file settings\n if self.target_phil is None:\n self.write_default_phil()\n self.phil.ctr.SetValue(self.target_phil)\n\n # Resolution limits\n # \"Try/except\" for backwards compatibility\n try:\n lowres = self.params.cctbx_ha14.resolution_limits.low\n hires = self.params.cctbx_ha14.resolution_limits.high\n self.res_limits.lowres.SetValue(str(lowres))\n self.res_limits.hires.SetValue(str(hires))\n except AttributeError:\n pass\n\n # Target options\n # \"Try/except\" for backwards compatibility\n try:\n t_uc = self.params.cctbx_ha14.target_unit_cell\n t_lat = self.params.cctbx_ha14.target_lattice_type\n l_idx = self.target_lattice.ctr.FindString(str(t_lat))\n t_ctype = self.params.cctbx_ha14.target_centering_type\n if t_ctype == 'P':\n c_idx = 1\n elif t_ctype == 'C':\n c_idx = 2\n elif t_ctype == 'I':\n c_idx = 3\n elif t_ctype == 'R':\n c_idx = 4\n elif t_ctype == 'F':\n c_idx = 5\n else:\n c_idx = 0\n if t_uc is not None:\n uc_str = [str(i) for i in t_uc.parameters()]\n self.target_uc.cell.SetValue(' '.join(uc_str))\n self.target_lattice.ctr.SetSelection(l_idx)\n self.target_centering.ctr.SetSelection(c_idx)\n except AttributeError:\n pass\n\n # Grid search options\n idx = self.gs_type.ctr.FindString(self.params.cctbx_ha14.grid_search.type)\n self.set_grid_search(idx=idx)\n self.signal_search.SetValue(self.params.cctbx_ha14.grid_search.sig_height_search)\n\n # # Selection options\n # self.select_only.SetValue(self.params.cctbx_ha14.selection.select_only.flag_on)\n # self.img_objects_path.Enable(self.select_only.GetValue())\n\n idx = self.select_by.ctr.FindString(self.params.cctbx_ha14.selection.select_by)\n self.select_by.ctr.SetSelection(idx)\n\n self.min_sigma.sigma.SetValue(str(self.params.cctbx_ha14.selection.min_sigma))\n\n # Selection filters\n if self.params.cctbx_ha14.selection.prefilter.flag_on:\n pg = self.params.cctbx_ha14.selection.prefilter.target_pointgroup\n ut = self.params.cctbx_ha14.selection.prefilter.target_uc_tolerance\n rs = self.params.cctbx_ha14.selection.prefilter.min_resolution\n rf = self.params.cctbx_ha14.selection.prefilter.min_reflections\n if self.params.cctbx_ha14.selection.prefilter.target_unit_cell is not None:\n try:\n uc = self.params.cctbx_ha14.selection.prefilter.target_unit_cell.parameters()\n except AttributeError:\n uc = None\n else:\n uc = None\n\n if str(pg).lower() != 'none':\n self.filt_lattice.toggle_boxes()\n self.filt_lattice.lattice.SetValue(str(pg))\n if str(uc).lower() != 'none':\n self.filt_uc.toggle_boxes()\n self.filt_uc.a.SetValue(str(uc[0]))\n self.filt_uc.b.SetValue(str(uc[1]))\n self.filt_uc.c.SetValue(str(uc[2]))\n self.filt_uc.alpha.SetValue(str(uc[3]))\n self.filt_uc.beta.SetValue(str(uc[4]))\n self.filt_uc.gamma.SetValue(str(uc[5]))\n self.filt_uc.tolerance.SetValue(str(ut))\n if str(rs).lower() != 'none':\n self.filt_res.toggle_boxes()\n self.filt_res.res.SetValue(str(rs))\n if str(rf).lower() != 'none':\n self.filt_ref.toggle_boxes()\n self.filt_ref.ref.SetValue(str(rf))" ]
[ "0.6119822", "0.52715033", "0.52555937", "0.521904", "0.5153852", "0.5145439", "0.50969875", "0.5046172", "0.5035334", "0.5006989", "0.49636608", "0.49189067", "0.49184573", "0.4891116", "0.4880313", "0.4876546", "0.48572096", "0.4852319", "0.48459044", "0.4828242", "0.48211944", "0.4816301", "0.4812694", "0.4803674", "0.47828144", "0.4772682", "0.4767819", "0.47572675", "0.4753877", "0.47485572" ]
0.60989493
1
Find all occurences of val on list lo Returns a list of indices of val on lo.
def findall(lo,val): u = [] i = -1 while( i < len(lo)-1): try: i = lo.index(val,i+1) u.append(i) except: i += 1 return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def all_indices(haystack, needle):\n index = 0\n indices = list()\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n indices.append(i)\n index = i+1\n return indices", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def indexof(self, value, tag=WORD):\n match = lambda a, b: a.endswith(\"*\") and b.startswith(a[:-1]) or a==b\n indices = []\n for i in range(len(self.words)):\n if match(value, unicode(self.get(i, tag))):\n indices.append(i)\n return indices", "def indexAll(inputList=None, value=None):\r\n if not isinstance(inputList, list):\r\n raise TypeError('Input list must be a list object.')\r\n return [i for i, x in enumerate(inputList) if x == value]", "def find_index(vec_vals,target):\n target=np.atleast_1d(target) #turn scalar into iterable, no op if already array\n vec_vals=np.array(vec_vals)\n index_list=[]\n for item in target:\n first_index=np.argmin(np.abs(vec_vals - item))\n index_list.append(first_index)\n return index_list", "def findIndices(g):\r\n change = [0]\r\n seen = [g[0]]\r\n for i in range(1, len(g)):\r\n if not g[i] in seen:\r\n change.append(i)\r\n seen.append(g[i])\r\n return change", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1", "def coord_indices_of(self, v_list):\n return [self.coord_index_of(v) for v in v_list]", "def find_at(self, x, y):\n return list(self.ifind_at(x, y))", "def lc_index(*args):\n index = []\n x = check_lc_data(args[0])\n i = 0\n for line in args[0].Data.LCData.lc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def int_to_indices(value: int, length: int, radix_bits: int) -> Iterable[int]:\n mask = (1 << radix_bits) - 1\n return ((value >> (i * radix_bits)) & mask for i in reversed(range(length)))", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def positions(self, searchstr: str):\n indices = []\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index >= 0:\n indices.append(index)\n return indices", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def index(self,*val):\n if len(val): self._value = self.allele_set[val[0]]\n return self.allele_set.index(self.value())", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def return_indices(nums, target):\n indices = []\n i = 0\n number_found = False\n while not number_found:\n my_target = nums[i]\n \n for j in range(i+1,len(nums)):\n my_target += nums[j]\n if my_target == target:\n number_found = True\n indices = [i, j]\n break\n my_target = nums[i]\n \n i+=1\n return indices" ]
[ "0.70793176", "0.7071109", "0.66156113", "0.6319743", "0.6250533", "0.620535", "0.62024206", "0.619781", "0.6169729", "0.60985184", "0.6077737", "0.6067603", "0.5907215", "0.5842827", "0.58379203", "0.5817054", "0.58101517", "0.5786621", "0.577902", "0.5734879", "0.5654707", "0.5616727", "0.55868834", "0.5581693", "0.5579649", "0.5515418", "0.5482658", "0.5481943", "0.54432505", "0.54349846" ]
0.8087934
0
Find overall 3D rotation (phi theta psi) between two sets of Eulerian angles. The two sets have to be of the same length and it is assume that k'th element on the first list corresponds to the k'th element on the second list.
def rotation_between_anglesets(agls1, agls2): from math import sin, cos, pi, sqrt, atan2, acos, atan from numpy import array, linalg, matrix import types deg2rad = pi/180.0 def ori2xyz(ori): if(type(ori) == types.ListType): phi, theta, psi = ori[:3] else: # it has to be Transformation object d = ori.get_params("spider") phi = d["phi"] theta = d["theta"] psi = d["psi"] """ # This makes no sense here! PAP 09/2011 if theta > 90.0: phi += 180.0 theta = 180.0-theta """ phi *= deg2rad theta *= deg2rad x = sin(theta) * sin(phi) y = sin(theta) * cos(phi) z = cos(theta) return [x, y, z] N = len(agls1) if N != len(agls2): print 'Both lists must have the same length' return -1 if N < 2: print 'At least two orientations are required in each list' return -1 U1, U2 = [], [] for n in xrange(N): p1 = ori2xyz(agls1[n]) p2 = ori2xyz(agls2[n]) U1.append(p1) U2.append(p2) # compute all Suv with uv = {xx, xy, xz, yx, ..., zz} Suv = [0] * 9 c = 0 nbori = len(U1) for i in xrange(3): for j in xrange(3): for s in xrange(nbori): Suv[c] += (U2[s][i] * U1[s][j]) c += 1 # create matrix N N = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]], [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]]) # eigenvector corresponding to the most positive eigenvalue val, vec = linalg.eig(N) q0, qx, qy, qz = vec[:, val.argmax()] # create quaternion Rot matrix r = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0, 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0, 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0] R = Transform(r) dictR = R.get_rotation('SPIDER') return dictR['phi'], dictR['theta'], dictR['psi']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def rotation_elements(self, eta, phi, theta):\n \n # Three-axis rotation:\n # 1. Rotate about +z by eta (follows RHR; rotation is mathematical and thus counter-clockwise)\n # 2. Tilt by phi with respect to +z (rotation about y-axis) then\n # 3. rotate by theta in-place (rotation about z-axis) ### BUG: This isn't a conceptual rotation about z (influenced by other rotations)\n \n\n eta = radians( eta ) # eta is orientation around the z axis (before reorientation)\n phi = radians( phi ) # phi is grain tilt (with respect to +z axis)\n theta = radians( theta ) # grain orientation (around the z axis)\n \n rotation_elements = [[ cos(eta)*cos(phi)*cos(theta)-sin(eta)*sin(theta) ,\n -cos(eta)*cos(phi)*sin(theta)-sin(eta)*cos(theta) ,\n -cos(eta)*sin(phi) ],\n [ sin(eta)*cos(phi)*cos(theta)+cos(eta)*sin(theta) ,\n -sin(eta)*cos(phi)*sin(theta)+cos(eta)*cos(theta) ,\n sin(eta)*sin(phi) ],\n [ -sin(phi)*cos(theta) ,\n sin(phi)*sin(theta) ,\n cos(phi) ]]\n \n return rotation_elements", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def check_angles(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicAngleForce, \"Error: forces must be HarmonicAngleForces\"\n\n n_angles0 = force0.getNumAngles()\n n_angles1 = force1.getNumAngles()\n\n dict0, dict1 = {}, {}\n\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(0)\n #unit_theta = theta0.unit\n unit_theta = u.degrees\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.degrees)**2\n\n for k in range(n_angles0):\n i0, i1, i2, theta0, k0 = force0.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict0[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n for k in range(n_angles1):\n i0, i1, i2, theta0, k0 = force1.getAngleParameters(k)\n if (k0 / k0.unit) != 0.0: # Skip forces with strength 0.0\n i0, i1, i2 = reorder_angles(i0, i1, i2)\n dict1[i0, i1, i2] = ((theta0 / unit_theta, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Angles0 - Angles1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Angles1 - Angles0 = %s\" % (keys1.difference(keys0)))\n diff_keys = keys0.symmetric_difference(keys1)\n assert diff_keys == set(), \"Systems have different HarmonicAngleForce entries: extra keys are: \\n%s\" % diff_keys\n\n for k, parameter_name in enumerate([\"theta0\", \"k0\"]):\n for (i0, i1, i2) in dict0.keys():\n val0 = dict0[i0, i1, i2][k]\n val1 = dict1[i0, i1, i2][k]\n if parameter_name=='theta0':\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has angle values of %f and %f degrees, respectively.\" % (i0, i1, i2, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Angle (%d, %d, %d) has force constant values of %f and %f kJ/(mol degree**2), respectively.\" % (i0, i1, i2, val0, val1)", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def solve_r3_rotation_for_angles_given_axes(\n R, e1, e2, e3, smaller_phi2_solution=True, return_both_solutions=False, deg=False\n):\n\n assert R.is_r3_rotation_matrix()\n e1 = matrix.col(e1).normalize()\n e2 = matrix.col(e2).normalize()\n e3 = matrix.col(e3).normalize()\n # Fail if e2 & e3 are parallel\n e2xe3 = e2.cross(e3)\n if e2xe3.length_sq() < 1.0e-6:\n return None\n # Make a unit test vector\n u = e2xe3.normalize()\n e1e2 = e1.dot(e2)\n e1e3 = e1.dot(e3)\n e2e3 = e2.dot(e3)\n e1e2e3 = e1.dot(e2.cross(e3))\n Re3 = R * e3\n e1Re3 = e1.dot(Re3)\n # ** Step 1 ** Calculation of phi2 (Bricogne equation (4))\n # e1.(R e3) = (e1.e2)(e2.e3) + {(e1.e3) - (e1.e2)(e2.e3)} cos(phi2)\n # + (e1.e2 x e3) sin(phi2)\n # The coefficients of cos & sin phi2\n cc = e1e3 - e1e2 * e2e3\n ss = e1e2e3\n # Fail if both are zero (indeterminate)\n if abs(cc) < 1.0e-6 and abs(ss) < 1.0e-6:\n return None\n norm = math.sqrt(cc * cc + ss * ss)\n rhs = (e1Re3 - e1e2 * e2e3) / norm\n # abs(rhs) should not be greater than 1.0, allowing a small tolerance\n if abs(rhs) > 1.000002:\n return None\n if rhs > 1.0:\n rhs = 1.0\n elif rhs < -1.0:\n rhs = -1.0\n cc /= norm\n ss /= norm\n # Solve rhs = cos(phi2) * cc + sin(phi2) * ss\n # using cos(a-b) = cos(a) cos(b) + sin(a) sin(b)\n # where b = phi2\n a = math.atan2(ss, cc)\n amb = math.acos(rhs)\n # Two solutions in range -pi to +pi\n # Note that if e1 == e3, ss = 0, a = 0 & phi2b = -phi2a\n phi2a = a - amb\n if phi2a > math.pi:\n phi2a -= 2.0 * math.pi\n elif phi2a < -math.pi:\n phi2a += 2.0 * math.pi\n phi2b = a + amb\n if phi2b > math.pi:\n phi2b -= 2.0 * math.pi\n elif phi2b < -math.pi:\n phi2b += 2.0 * math.pi\n if return_both_solutions:\n phi2_ = (phi2a, phi2b)\n elif smaller_phi2_solution:\n if abs(phi2a) < abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n else:\n if abs(phi2a) > abs(phi2b):\n phi2_ = (phi2a,)\n else:\n phi2_ = (phi2b,)\n solutions = []\n for phi2 in phi2_:\n # ** Step 2 ** Calculation of phi1\n R2 = e2.axis_and_angle_as_r3_rotation_matrix(phi2, deg=False)\n R2inv = R2.transpose()\n v = R2 * e3\n w = Re3\n v1 = v - (v.dot(e1)) * e1\n w1 = w - (w.dot(e1)) * e1\n norm = v1.dot(v1) * w1.dot(w1)\n # If norm = 0, rotations 1 & 3 are around same axis (for this phi2),\n # so any value for phi1 is OK\n if norm > 1.0e-8:\n norm = math.sqrt(norm)\n # cos(phi1) = (v1.w1)/norm\n # sin(phi1) = (v1.w1 x e1)/norm\n phi1 = math.atan2(v1.dot(w1.cross(e1)) / norm, v1.dot(w1) / norm)\n if phi1 > math.pi:\n phi1 -= 2.0 * math.pi\n if phi1 < -math.pi:\n phi1 += 2.0 * math.pi\n else:\n phi1 = 0.0\n # ** Step 3 ** Calculation of phi3\n R1inv = e1.axis_and_angle_as_r3_rotation_matrix(-1.0 * phi1, deg=False)\n R3 = R2inv * R1inv * R\n R3u = R3 * u\n # sin(phi3) = u.R3u x e3\n # cos(phi3) = u.R3u\n phi3 = math.atan2(u.dot(R3u.cross(e3)), u.dot(R3u))\n if deg:\n phi1, phi2, phi3 = tuple([x * 180 / math.pi for x in (phi1, phi2, phi3)])\n solutions.append((phi1, phi2, phi3))\n\n if return_both_solutions:\n return solutions\n else:\n return solutions[0]", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def get_phi_kappa_omega(self, angles):\n (phi) = angles[0]\n (kappa) = angles[1]\n (omega) = angles[2]\n return (phi, kappa, omega)", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def rotate(self, ra1, dec1, ra2, dec2, ra3, dec3):\n # Turns Right Ascension/Declination into Azimuth/Zenith for healpy\n phi1 = ra1 - np.pi\n zen1 = np.pi/2. - dec1\n phi2 = ra2 - np.pi\n zen2 = np.pi/2. - dec2\n phi3 = ra3 - np.pi\n zen3 = np.pi/2. - dec3\n\n # Rotate each ra1 and dec1 towards the pole?\n x = np.array([hp.rotator.rotateDirection(\n hp.rotator.get_rotation_matrix((dp, -dz, 0.))[0], z, p)\n for z, p, dz, dp in zip(zen1, phi1, zen2, phi2)])\n\n # Rotate **all** these vectors towards ra3, dec3 (source_path)\n zen, phi = hp.rotator.rotateDirection(np.dot(\n hp.rotator.get_rotation_matrix((-phi3, 0, 0))[0],\n hp.rotator.get_rotation_matrix((0, zen3, 0.))[0]), x[:, 0], x[:, 1])\n\n dec = np.pi/2. - zen\n ra = phi + np.pi\n return np.atleast_1d(ra), np.atleast_1d(dec)", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def angles_points(a, b, c):\n u = subtract_vectors(b, a)\n v = subtract_vectors(c, a)\n return angles_vectors(u, v)", "def test_d_3():\n rs = 20\n d = 3\n np.random.seed(rs)\n number_rotations = 3\n\n theta_1 = np.random.uniform(0, 2 * math.pi)\n rotation_1 = np.identity(d)\n pos_1 = np.random.randint(0, d - 1)\n pos_2 = np.random.randint(pos_1 + 1, d)\n rotation_1[pos_1, pos_1] = math.cos(theta_1)\n rotation_1[pos_1, pos_2] = - math.sin(theta_1)\n rotation_1[pos_2, pos_1] = math.sin(theta_1)\n rotation_1[pos_2, pos_2] = math.cos(theta_1)\n\n theta_2 = np.random.uniform(0, 2 * math.pi)\n rotation_2 = np.identity(d)\n pos_3 = np.random.randint(0, d - 1)\n pos_4 = np.random.randint(pos_3 + 1, d)\n rotation_2[pos_3, pos_3] = math.cos(theta_2)\n rotation_2[pos_3, pos_4] = - math.sin(theta_2)\n rotation_2[pos_4, pos_3] = math.sin(theta_2)\n rotation_2[pos_4, pos_4] = math.cos(theta_2)\n\n theta_3 = np.random.uniform(0, 2 * math.pi)\n rotation_3 = np.identity(d)\n pos_5 = np.random.randint(0, d - 1)\n pos_6 = np.random.randint(pos_5 + 1, d)\n rotation_3[pos_5, pos_5] = math.cos(theta_3)\n rotation_3[pos_5, pos_6] = - math.sin(theta_3)\n rotation_3[pos_6, pos_5] = math.sin(theta_3)\n rotation_3[pos_6, pos_6] = math.cos(theta_3)\n\n final_rotation = rotation_1 @ rotation_2 @ rotation_3\n np.random.seed(rs)\n rotation_function = (mt_obj.calculate_rotation_matrix\n (d, number_rotations))\n assert(np.all(final_rotation == rotation_function))", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def helical_consistency(p2i, p1):\n\tfrom pixel_error import angle_diff\n\tfrom math import cos,pi\n\tfrom utilities import getvec\n\tfrom pixel_error import angle_error\n\tfrom EMAN2 import Vec2f\n\tn =len(p1[0])\n\tprint n\n\tqtm = -1.0e10\n\tfor lf in xrange(0,181,180):\n\t\tp2 = []\n\t\tp2.extend(p2i)\n\t\tif( lf == 180):\n\t\t\ttflip = Transform({\"type\":\"spider\",\"theta\":180.0})\n\t\t\tfor j in xrange(n):\n\t\t\t\tt2 = Transform({\"type\":\"spider\",\"phi\":p2[0][j],\"theta\":p2[1][j],\"psi\":p2[2][j]})\n\t\t\t\tt2.set_trans( Vec2f( -p2[3][j], -p2[4][j] ) )\n\t\t\t\tt2 = t2*tflip\n\t\t\t\td = t2.get_params(\"spider\")\n\t\t\t\tp2[0][j] = d[\"phi\"]\n\t\t\t\tp2[1][j] = d[\"theta\"]\n\t\t\t\tp2[2][j] = d[\"psi\"]\n\t\t\t\tp2[3][j] = -d[\"tx\"]\n\t\t\t\tp2[4][j] = -d[\"ty\"]\n\t\ttt1 = [0.0]*n\n\t\ttt2 = [0.0]*n\n\t\tmirror = [False]*n\n\t\tln = 0\n\t\tfor j in xrange( n ):\n\t\t\tt1 = getvec(p1[0][j],p1[1][j])\n\t\t\tt2 = getvec(p2[0][j],p2[1][j])\n\t\t\ttm = getvec(180.0+p2[0][j],180.0-p2[1][j])\n\t\t\ttt1[j] = t1[0]*t2[0]+t1[1]*t2[1]+t1[2]*t2[2]\n\t\t\ttt2[j] = t1[0]*tm[0]+t1[1]*tm[1]+t1[2]*tm[2]\n\t\t\tif(abs(tt1[j])<1.0e-7): tt1[j] = 0.0\n\t\t\tif(abs(tt2[j])<1.0e-7): tt2[j] = 0.0\n\t\t\tif(tt1[j]>tt2[j]):\n\t\t\t\tmirror[j] = True\n\t\t\t\tln+=1\n\t\tprint \" FLIP \",lf\n\t\tif(ln < n//2):\n\t\t\tprint \"mirror \",ln\n\t\t\tfor j in xrange( n ):\n\t\t\t\tp2[0][j] += 180.0\n\t\t\t\tp2[1][j] = 180.0-p2[1][j]\n\t\t\t\tp2[2][j] = -p2[2][j]\n\t\t\t\tp2[4][j] = -p2[4][j]\n\t\t\t\tmirror[j] = not(mirror[j])\n\t\telse:\n\t\t\tprint \" straight\", ln\n\t\tphi1 = []\n\t\tphi2 = []\n\t\tagree = []\n\t\tfor j in xrange(n):\n\t\t\tif(mirror[j]):\n\t\t\t\tphi1.append(p1[0][j])\n\t\t\t\tphi2.append(p2[0][j])\n\t\t\t\tagree.append(j)\n\t\tprint len(phi1)\n\t\tdelta_phi = angle_diff( phi2, phi1 )\n\t\tprint \"close form diff===\", delta_phi\n\n\t\tphi1 = []\n\t\tphi2 = []\n\t\terrorm = []\n\t\tfor j in xrange( len( p1[0]) ):\n\t\t\tp2[0][j] = (p2[0][j] + delta_phi + 360)%360.0\n\t\t\tif(mirror[j]):\n\t\t\t\tphi1.append(p1[0][j])\n\t\t\t\tphi2.append(p2[0][j])\n\t\t\t\terrorm.append(angle_error( [ p2[0][j] ], [ p1[0][j] ]))\n\t\tqt = sum(errorm)/len(errorm)\n\t\tprint len(errorm),qt\n\t\tif(qt > qtm):\n\t\t\tqtm = qt\n\t\t\tp2o = []\n\t\t\tp2o.extend(p2)\n\t\t\terrormo = []\n\t\t\tphi1o = []\n\t\t\tphi2o = []\n\t\t\terrormo.extend(errorm)\n\t\t\tphi1o.extend(phi1)\n\t\t\tphi2o.extend(phi2)\n\t\t\n\treturn p2o, errormo, agree, delta_phi, phi1o, phi2o", "def rot_align(m, coeff, pairs):\n n_theta = 360\n p = pairs.shape[0]\n c = np.zeros((m + 1, p), dtype='complex128')\n m_list = np.arange(1, m + 1)\n\n max_iter = 100\n precision = 1e-10\n\n # Find initial points for Newton Raphson\n for i in range(m + 1):\n c[i] = np.einsum('ij, ij -> j', np.conj(coeff[i][:, pairs[:, 0]]), coeff[i][:, pairs[:, 1]])\n\n c2 = np.flipud(np.conj(c[1:]))\n b = (2 * m + 1) * np.real(common.icfft(np.concatenate((c2, c), axis=0)))\n rot = np.argmax(b, axis=0)\n rot = (rot - m) * n_theta / (2 * m + 1)\n\n # creating f' and f'' function\n m_list_ang_1j = 1j * m_list * np.pi / 180\n c_for_f_prime_1 = m_list_ang_1j * c[1:].T\n c_for_f_prime_2 = np.square(m_list_ang_1j) * c[1:].T\n\n def f_prime(x):\n return np.sum(np.real(c_for_f_prime_1 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n def f_prime2(x):\n return np.sum(np.real(c_for_f_prime_2 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n # Finding brackets, x1<x2 such that sign(f(x1)) != sign(f(x2)) and rot = (x1 + x2) / 2\n step_size = 0.5\n x1 = rot.copy()\n x2 = rot.copy()\n bad_indices = np.full(p, True)\n while np.any(bad_indices):\n x1[bad_indices] -= step_size\n x2[bad_indices] += step_size\n f_x1 = f_prime(x1)\n f_x2 = f_prime(x2)\n bad_indices = f_x1 * f_x2 > 0\n\n # Setting x1, x2 into x_low, x_high such that f(x_low)<f(x_high).\n x_low = x1.copy()\n x_high = x2.copy()\n f_x_low = f_prime(x_low)\n f_x_high = f_prime(x_high)\n x_high_is_low = f_x_high < f_x_low\n tmp = x_low.copy()\n tmp[x_high_is_low] = x_high[x_high_is_low]\n x_high[x_high_is_low] = x_low[x_high_is_low]\n x_low = tmp\n\n # Handling f(x) = 0 case\n f_x_low = f_prime(x_low)\n f_x_low_0 = f_x_low == 0\n x_high[f_x_low_0] = x_low[f_x_low_0]\n f_x_high = f_prime(x_high)\n f_x_high_0 = f_x_high == 0\n x_low[f_x_high_0] = x_high[f_x_high_0]\n\n rts = (x_low + x_high) / 2\n dx = np.abs(x_low - x_high)\n dx_old = dx.copy()\n f = f_prime(rts)\n df = f_prime2(rts)\n for _ in range(max_iter):\n bisect_indices = np.bitwise_or(((rts - x_high) * df - f) * ((rts - x_low) * df - f) > 0,\n np.abs(2 * f) > np.abs(dx_old * df))\n newton_indices = ~bisect_indices\n dx_old = dx.copy()\n\n # Handling out of range indices with Bisect step\n dx[bisect_indices] = (x_high[bisect_indices] - x_low[bisect_indices]) / 2\n rts[bisect_indices] = x_low[bisect_indices] + dx[bisect_indices]\n\n # Handling the rest with newton step\n dx[newton_indices] = f[newton_indices] / df[newton_indices]\n rts[newton_indices] -= dx[newton_indices]\n\n # Stop criteria\n if np.all(np.abs(dx) < precision):\n break\n\n # Else update parameters\n f = f_prime(rts)\n df = f_prime2(rts)\n f_negative = f < 0\n x_low[f_negative] = rts[f_negative]\n x_high[~f_negative] = rts[~f_negative]\n\n # Changing low and high of converged points\n converged = np.abs(dx) < precision\n x_low[converged] = rts[converged]\n x_high[converged] = rts[converged]\n\n print(np.sum(np.abs(dx) < precision))\n\n rot = rts\n m_list = np.arange(m + 1)\n m_list_ang = m_list * np.pi / 180\n c *= np.exp(1j * np.outer(m_list_ang, rot))\n corr = (np.real(c[0]) + 2 * np.sum(np.real(c[1:]), axis=0)) / 2\n\n return corr, rot", "def rotation(self, e1, e2, theta):\n e1_r = e1 * numpy.cos(2 * theta) - e2 * numpy.sin(2 * theta)\n e2_r = e1 * numpy.sin(2 * theta) + e2 * numpy.cos(2 * theta)\n return e1_r, e2_r", "def add_triangles(t1, t2):\n solutions = []\n for i in range(3):\n for j in range(3):\n # See if t1 angle 0 and t2 angle i can be merged\n if eq(t1.angles[i] + t2.angles[j], math.pi):\n # The two angles (t1[i] and t2[j]) fit together to form a straight\n # line. Now we just need to make sure that the sides that are\n # merging are the same length\n if eq(t1.sides[(i + 1) % 3], t2.sides[(j + 2) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 1) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 1) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n if eq(t1.sides[(i + 2) % 3], t2.sides[(j + 1) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 2) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 2) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n return solutions", "def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def addVectors(r1, r2):\n \"\"\" [0] = angle, [1] = lenght \"\"\"\n x = (math.sin(r1[0]) * r1[1]) + (math.sin(r2[0]) * r2[1])\n y = (math.cos(r1[0]) * r1[1]) + (math.cos(r2[0]) * r2[1])\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def get_best_quaternion(coordlist1, coordlist2):\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n\n N = np.matrix([[N11, N12, N13, N14],\n [N21, N22, N23, N24],\n [N31, N32, N33, N34],\n [N41, N42, N43, N44]])\n\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1, ).tolist()\n return quat, max(w)", "def orientation(sign1L, sign2L):\n p_p = 0\n m_m = 0\n p_m = 0\n m_p = 0\n for index in range(len(sign1L)):\n sign1 = sign1L[index]\n sign2 = sign2L[index]\n if sign1 in [\"+\", \"-\"] and sign2 in [\"+\", \"-\"]:\n if sign1 == sign2:\n if sign1 == \"+\":\n p_p += 1\n elif sign1 == \"-\":\n m_m += 1\n else:\n if sign1 == \"+\" and sign2 == \"-\":\n p_m += 1\n elif sign1 == \"-\" and sign2 == \"+\":\n m_p += 1\n same_strand = p_p + m_m\n opposite_strand = p_m + m_p\n convergent = p_m\n divergent = m_p\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent" ]
[ "0.61534816", "0.6057964", "0.6041564", "0.6022013", "0.5908641", "0.5882892", "0.5800431", "0.57707256", "0.5706791", "0.56642485", "0.56599987", "0.5649005", "0.5628651", "0.5623927", "0.56015086", "0.5588204", "0.55626535", "0.5548194", "0.55238026", "0.5522772", "0.5512592", "0.5511358", "0.54814446", "0.54579234", "0.5439676", "0.54255545", "0.5425142", "0.5417542", "0.54150444", "0.5410281" ]
0.7362805
0
Retrieve pixel size from the header. We check attribute Pixel_size and also pixel size from ctf object, if exisits. If the two are different or if the pixel size is not set, return 1.0 and print a warning.
def get_pixel_size(img): p1 = img.get_attr_default("apix_x", -1.0) cc = img.get_attr_default("ctf", None) if cc == None: p2 = -1.0 else: p2 = round(cc.apix, 3) if p1 == -1.0 and p2 == -1.0: ERROR("Pixel size not set", "get_pixel_size", 0) return -1.0 elif p1 > -1.0 and p2 > -1.0: if abs(p1-p2) >= 0.001: ERROR("Conflict between pixel size in attribute and in ctf object", "get_pixel_size", 0) # pixel size is positive, so what follows omits -1 problem return max(p1, p2) else: return max(p1, p2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def get_pixel_size(self):\n raise NotImplementedError", "def px_size(self):\n xp, yp = ct.c_float(), ct.c_float()\n\n self.lib.GetPixelSize(ct.pointer(xp), ct.pointer(yp))\n\n return (xp.value, yp.value)", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def get_image_size(self):", "def get_size(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n size = file_meta_plist['$objects'][1]['Size']\n return size\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['size']", "def size(img):\n\treturn img.size", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def size(self):\n if self._size and not self._pil_image:\n return self._size\n else:\n return self.pil_image.size", "def pix_size(self):\n return self._pix_size", "def getSize(self):\n outSize = float2()\n _res = self.mAPIContext.SDGraphObjectFrame_getSize(self.mHandle, ctypes.byref(outSize))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return outSize", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def __header_size(self):\n return self.SIZE_LINEUPS + self.SIZE_PLAYERS_PER_LINEUP", "def byte_size(self) -> int:\n return pixel_formats[self._dtype][3] * self._components * self.width * self.height", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def size(self):\n return self.__image.size", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def GetPixelSize(*args, **kwargs):\n return _gdi_.Font_GetPixelSize(*args, **kwargs)", "def size(self):\n if self._buffer is not None:\n length = SIZEOF_TAGHEADER\n if self._header.value_type == b'B':\n # TODO make sure this is right, need data that uses B to verify\n length += SIZEOF_UINT32 + (len(self._buffer))\n elif self._header.value_type in b'HZ':\n length += len(self._buffer)\n else:\n length += SIZEOF_TAG_TYPES[self._header.value_type]\n return length\n else:\n return 0", "def get_obj_size(self, name):\n\t\t# get handle\n\t\t# size of red blood cell\n\t\twidth = 60.35\n\t\treturn width", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def find_size(mod):\n left = right = top = bottom = 0\n\n for line in (n for n in mod if n[0] == \"fp_line\"):\n layer = [n for n in line if n[0] == \"layer\"][0]\n if layer[1] in (\"F.CrtYd\", \"B.CrtYd\"):\n start = [n for n in line if n[0] == \"start\"][0]\n end = [n for n in line if n[0] == \"end\"][0]\n for x, y in (start[1:], end[1:]):\n x = float(x)\n y = float(y)\n left = min(x, left)\n right = max(x, right)\n top = min(y, top)\n bottom = max(y, bottom)\n\n width = right - left\n height = bottom - top\n\n left -= width * border_ratio\n right += width * border_ratio\n top -= height * border_ratio\n bottom += height * border_ratio\n\n return left, right, top, bottom", "def get_size(self):", "def getWidth(self):\r\n width = 1\r\n if self.orientation == \"h\":\r\n width = self.size\r\n return width" ]
[ "0.72802687", "0.68242306", "0.68130434", "0.677174", "0.6668118", "0.6537701", "0.6535796", "0.6514242", "0.6482996", "0.64115053", "0.63932556", "0.6321486", "0.6316444", "0.6295255", "0.62676024", "0.62676024", "0.62564975", "0.6233684", "0.6221311", "0.6203486", "0.61531", "0.61531", "0.6130825", "0.6117649", "0.6106248", "0.60972565", "0.6089569", "0.6078482", "0.60716933", "0.6069912" ]
0.69274837
1
For the given grouping, convert ROOT files into DataFrames merging groups together. Return a dictionary mapping file names to DataFrames.
def process_group(directory: str, files: dict, channel: str, year: str) -> dict: if len(files) == 0: raise Exception('empty file list for directory {}'.format(directory)) + 1 dataframes = {} for name, ifile in files.items(): # equivalent of hadding update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree') current_dfs = [] for update_df in update_dfs: update_df.fillna(-999, inplace=True) current_dfs.append(update_df) if len(current_dfs) > 0: dataframes[name] = pd.concat(current_dfs) dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]}) return dataframes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_processed_data(self, group_directory):\n processed_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'processed' in x.name][0]\n\n task_dirs = [x for x in processed_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_dirs = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n task_frame_files = list()\n if task_camera_dirs:\n task_frame_files = dict()\n for camera_dir in task_camera_dirs:\n task_frame_files[camera_dir.name] = [x for x in camera_dir.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for camera, frame_files in task_frame_files.items():\n for frame_file in frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n\n if frame not in files[task.name]:\n files[task.name][frame] = dict()\n\n files[task.name][frame][camera] = frame_file\n\n else:\n task_frame_files = [x for x in task.iterdir()\n if not x.is_dir()\n and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for frame_file in task_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n files[task.name][frame] = frame_file\n\n return files", "def _load_group_data(directory='', file_name='', df=True):\n\n # check if folder exists with experiment name\n if os.path.isdir(directory) is False:\n print 'making new directory to save data'\n os.mkdir(directory)\n \n # all files in directory\n files = os.listdir(directory)\n\n # if data file already exists\n if file_name in files:\n print 'group data found:', file_name\n\n # if data stored as pandas dataframe\n if df:\n # load data\n print directory+file_name\n group_data = pd.read_pickle(directory+file_name)\n print 'group data loaded'\n\n # if stored as dictionary\n else:\n # load data\n with open(directory+file_name, 'rb') as pkl_file:\n group_data= pickle.load(pkl_file)\n print 'group data loaded'\n\n # otherwise create data structure\n else:\n # data organized as {frequency}{syn distance}{number of synapses}{polarity}[trial]{data type}{tree}[section][segment][spikes]\n print 'no group data found'\n if df:\n group_data = pd.DataFrame()\n else:\n group_data= {}\n\n return group_data", "def get_clean_data(self, group_directory):\n clean_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'clean' in x.name][0]\n\n task_dirs = [x for x in clean_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_directory = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n camera_files = dict()\n for camera_directory in task_camera_directory:\n camera_id = camera_directory.name\n camera_frame_files = [x for x in camera_directory.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n for frame_file in camera_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if camera_directory.name not in camera_files:\n camera_files[camera_id] = dict()\n\n camera_files[camera_id][frame] = frame_file\n files[task.name] = camera_files\n\n return files", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)", "def generate_group_summary_table(self, groups, group_names=None):\n output = {\n 'patient_patches': {},\n 'slide_patches': {},\n 'patient_slides': {},\n }\n groups['chunks'].sort(key=lambda chunk: chunk['id'])\n category_names = sorted([c.name for c in self.CategoryEnum])\n cum_header = 'Overall' if self.is_binary else 'Total'\n headers = category_names + [cum_header]\n num_headers = len(headers)\n group_patches = pd.DataFrame(columns=headers)\n group_slides = pd.DataFrame(columns=headers)\n group_patients = pd.DataFrame(columns=headers)\n for chunk in groups['chunks']:\n try:\n group_name = group_names[chunk['id']]\n except (TypeError, KeyError):\n group_name = f\"Group {chunk['id'] + 1}\"\n patch_paths = chunk['imgs']\n patches = {name: set() for name in category_names}\n slides = {name: set() for name in category_names}\n patients = {name: set() for name in category_names}\n all_patches = set()\n all_slides = set()\n all_patients = set()\n patient_patches = pd.DataFrame(columns=headers)\n slide_patches = pd.DataFrame(columns=headers)\n patient_slides = pd.DataFrame(columns=headers)\n for patch_path in patch_paths:\n patch_id = utils.create_patch_id(patch_path, self.patch_pattern)\n label = utils.get_label_by_patch_id(patch_id, self.patch_pattern,\n self.CategoryEnum, is_binary=self.is_binary).name\n slide_name = utils.get_slide_by_patch_id(patch_id, self.patch_pattern)\n patient_id = utils.get_patient_by_slide_id(slide_name,\n dataset_origin=self.dataset_origin)\n\n patches[label].add(patch_id)\n\n if slide_name not in slides[label]:\n if patient_id not in patient_slides.index:\n patient_slides.loc[patient_id] = [0] * num_headers\n patient_slides.at[patient_id, label] += 1\n if slide_name not in all_slides:\n patient_slides.at[patient_id, cum_header] += 1\n \n slides[label].add(slide_name)\n patients[label].add(patient_id)\n\n if patient_id not in patient_patches.index:\n patient_patches.loc[patient_id] = [0] * num_headers\n patient_patches.at[patient_id, label] += 1\n patient_patches.at[patient_id, cum_header] += 1\n\n if slide_name not in slide_patches.index:\n slide_patches.loc[slide_name] = [0] * num_headers\n slide_patches.at[slide_name, label] += 1\n slide_patches.at[slide_name, cum_header] += 1\n\n all_patches.add(patch_id)\n all_slides.add(slide_name)\n all_patients.add(patient_id)\n\n for label, s in patches.items():\n group_patches.at[group_name, label] = len(s)\n group_patches.at[group_name, cum_header] = len(all_patches)\n for label, s in slides.items():\n group_slides.at[group_name, label] = len(s)\n group_slides.at[group_name, cum_header] = len(all_slides)\n for label, s in patients.items():\n group_patients.at[group_name, label] = len(s)\n group_patients.at[group_name, cum_header] = len(all_patients)\n\n patient_patches.loc[\"Total\"] = patient_patches.sum().astype(int)\n slide_patches.loc[\"Total\"] = slide_patches.sum().astype(int)\n patient_slides.loc[\"Total\"] = patient_slides.sum().astype(int)\n output['patient_patches'][group_name] = patient_patches\n output['slide_patches'][group_name] = slide_patches\n output['patient_slides'][group_name] = patient_slides\n \n group_patches.loc['Total'] = group_patches.sum().astype(int)\n group_slides.loc['Total'] = group_slides.sum().astype(int)\n group_patients.loc['Total'] = group_patients.sum().astype(int)\n output['group_patches'] = group_patches\n output['group_slides'] = group_slides\n output['group_patients'] = group_patients\n return output", "def simple_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n # make sure this object has been .fit()\n self._update()\n\n groups = self._parse_groups_file(group_labels_file=group_labels_file)\n patids = self._get_subject_ids(subjid_pat=subjid_pat)\n\n loads = self._load_loadings()\n\n # build the raw loadings table\n df = build_raw_loadings_table(loads, patids)\n df = add_groups_to_loadings_table(df, groups)\n return df", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups", "def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group", "def get_data(self, df, latest_currency):\n file_paths = list(df[\"File\"])\n df = self.extract_df(file_paths[0])\n df = self.group_df(df)\n df = self.fill_league_currency(df, latest_currency)\n for file_path in file_paths[1:]:\n league = self.extract_df(file_path)\n league_grp = self.group_df(league)\n league_grp = self.fill_league_currency(league_grp, latest_currency)\n df = df.join(league_grp)\n df = df.reset_index(drop=True)\n return df", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def _generate_datasets(self):\n datasets = list()\n for fname in sorted(os.listdir(self.base_dir)):\n if not self._filename_re.match(fname):\n continue\n\n file_path = os.path.join(self.base_dir, fname)\n try:\n fh = self._open_hdf5(file_path)\n\n except (IOError, OSError) as e:\n warnings.warn('Cannot access {}; skipped'.format(file_path))\n print(e)\n continue\n\n for key in fh:\n if self._groupname_re.match(key.lstrip('/')):\n datasets.append(ObjectTableWrapper(fh, key, self._schema))\n continue\n\n warn_msg = 'incorrect group name \"{}\" in {}; skipped this group'\n warnings.warn(warn_msg.format(os.path.basename(file_path), key))\n\n return datasets", "def get_datasets(h5group, prefix=''):\n for key in h5group.keys():\n h5obj = h5group[key]\n path = '{}/{}'.format(prefix, key)\n attrs = {att:val for att, val in h5obj.attrs.items()}\n\n if isinstance(h5obj, h5py.Dataset): \n \n # get metadata\n units = attrs[\"units\"] if 'units' in attrs else None\n spec = attrs[\"datatype\"] if 'datatype' in attrs else None\n \n # special handling for the nested waveform dataset\n if \"waveform/values/cumulative_length\" in path:\n nwfs = h5obj.shape[0]\n \n # must fix datatype AFTER this initial iteration\n yield (path, \"waveform\", nwfs, None, units, spec) \n elif \"waveform\" in path:\n pass\n \n # handle normal 'array<1>{real}' datasets\n else:\n yield (path, key, h5obj.shape[0], h5obj.dtype, units, spec) \n \n # test for group (go down)\n elif isinstance(h5obj, h5py.Group): \n yield from get_datasets(h5obj, path)", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def collect_data(input_folder, ratio):\n # TODO implement ratio\n data = pd.DataFrame()\n\n folderpaths = [os.path.normpath((os.path.join(input_folder, x)))\n for x in os.listdir(input_folder) if not x.endswith('.gitkeep')]\n # for folder in folderpaths:\n for folder in folderpaths:\n filepaths = [os.path.normpath((os.path.join(folder, x)))\n for x in os.listdir(folder) if not x.endswith('.gitkeep')]\n for file in filepaths:\n df = pd.read_pickle(file)\n df = df[df['is_feas'] == 1]\n data = data.append(df[['frames', 'label']], ignore_index=True)\n\n return data.rename(columns={'frames': 'x', 'label': 'y'})", "def weighted_loadings_df(self, group_labels_file, subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):\n # make sure file exists\n if not os.path.exists(group_labels_file):\n raise FileNotFoundError('The file {} has not been found.'.format(group_labels_file))\n\n self._update()\n\n # let's first pick the simple version of the loadings\n df = self.simple_loadings_df(group_labels_file, subjid_pat=subjid_pat)\n blobs = get_largest_blobs(self._icc_imgs)\n\n masks = [apply_mask(ic_map, blob) for ic_map, blob in zip(self._icc_imgs, blobs)]\n\n blob_avgs = [mask.mean() for mask in masks]\n\n blob_signs = np.sign(blob_avgs)\n n_ics = len(blob_avgs)\n df[list(range(1, n_ics+1))] = df[list(range(1, n_ics+1))] * blob_signs\n return df", "def read_dfdict_data(datadir, subset=None):\n print('Reading datasets...')\n # Initialize dict to store all dataframes\n dfdict = {}\n\n # If subset of datasets are given, read only those\n if subset is not None:\n with open(subset, 'r') as f:\n datasetids = f.read().splitlines()\n else:\n datasetids = get_dataset_ids(datadir)\n\n # Read each dataset and convert to relative abundance\n for dataset in datasetids:\n print(dataset),\n ## Read dataset\n df, meta = read_dataset_files(dataset, datadir)\n df = raw2abun(df)\n\n ## Get case and control samples\n classes_list = get_classes(meta)\n if len(classes_list[0]) == 0 or len(classes_list[1]) == 0:\n raise ValueError('Something wrong with ' + dataset + ' metadata.')\n H_smpls, dis_smpls = get_samples(meta, classes_list)\n\n dfdict.update({dataset: {'df': df, 'meta': meta, 'dis_smpls': dis_smpls, 'H_smpls': H_smpls, 'classes': classes_list}})\n print('\\nReading datasets... Finished.')\n return dfdict", "def collect2dict(filenames, outdir):\n \n tbldict = {}\n for fn in filenames:\n try:\n path = max(glob.glob(outdir+fn+'*.pkl'), key=os.path.getctime)\n out = pd.read_pickle(path)\n tbldict[fn] = out\n except ValueError:\n print(fn + ' not found in ' + outdir)\n return tbldict", "def defineFileGroups(self, mergeableFiles):\n fileGroups = {}\n foundFiles = []\n\n for mergeableFile in mergeableFiles:\n if mergeableFile[\"file_lfn\"] not in foundFiles:\n foundFiles.append(mergeableFile[\"file_lfn\"])\n else:\n continue\n\n if mergeableFile[\"pnn\"] not in fileGroups:\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]] = []\n else:\n fileGroups[mergeableFile[\"pnn\"]] = {}\n\n if self.mergeAcrossRuns:\n fileGroups[mergeableFile[\"pnn\"]].append(mergeableFile)\n else:\n if mergeableFile[\"file_run\"] not in fileGroups[mergeableFile[\"pnn\"]]:\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]] = []\n fileGroups[mergeableFile[\"pnn\"]][mergeableFile[\"file_run\"]].append(mergeableFile)\n\n return fileGroups", "def create_data_frame(self):\n column_names = Annotations.create_columns(self.headers, self.annot_types)\n dtypes = Annotations.get_dtypes_for_group_annots(self.headers, self.annot_types)\n df = self.open_file(\n self.file_path,\n open_as=\"dataframe\",\n # Coerce values in group annotations\n converters=dtypes,\n # Header/column names\n names=self.headers,\n # Prevent pandas from reading first 2 lines in file\n # since they're passed in with param 'names'\n skiprows=2,\n )[0]\n self.file = Annotations.convert_header_to_multi_index(df, column_names)", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def load_groupfile(infile_path):\n name, ext = os.path.splitext(os.path.basename(infile_path))\n if ext == '.tsv':\n df = pd.read_table(infile_path, header=0)\n elif ext == '.csv':\n df = pd.read_csv(infile_path, header=0)\n else:\n raise ValueError(\"File type not supported: \" + ext)\n\n return df", "def convert_subtree(group: h5py.Group, refs: h5py.Group):\n d = {}\n for key in group:\n if key == \"#refs#\":\n continue\n value = group[key]\n if isinstance(value, h5py.Group):\n d[key] = convert_subtree(value, refs=refs)\n elif isinstance(value, h5py.Dataset):\n d[key] = convert_dataset(value, refs=refs)\n else:\n raise ValueError(f\"Can't convert {value} of type {type(value)}.\")\n return d", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def load_data(self, directory, group):\n \n em_images = h5py.File(os.path.join(directory, \"image.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"human_labels_split.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:192], segmentations['main'][:192]\n elif group == 'dev':\n return em_images['main'][192:], segmentations['main'][192:]\n \"\"\"\n em_images = h5py.File(os.path.join(directory, \"voronoi_boundary.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"voronoi_segmentation.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:16], segmentations['main'][:16]\n elif group == 'dev':\n return em_images['main'][16:], segmentations['main'][16:]\n \"\"\"", "def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg", "def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df", "def _merge_records(group: List[ParseResult]):\n\n # Group the file list and parsers\n group_files = list(set(sum([tuple(x.group) for x in group], ())))\n group_parsers = '-'.join(sorted(set(sum([[x.parser] for x in group], []))))\n\n # Merge the metadata\n is_list = [isinstance(x.metadata, list) for x in group]\n if sum(is_list) > 1:\n raise NotImplementedError('We have not defined how to merge >1 list-type data')\n elif sum(is_list) == 1:\n list_data = group[is_list.index(True)].metadata\n if len(is_list) > 1:\n other_metadata = reduce(_merge_func,\n [x.metadata for x, t in zip(group, is_list) if not t])\n group_metadata = [_merge_func(x, other_metadata) for x in list_data]\n else:\n group_metadata = list_data\n else:\n group_metadata = reduce(_merge_func, [x.metadata for x in group])\n return ParseResult(group_files, group_parsers, group_metadata)" ]
[ "0.6541141", "0.60830426", "0.6062187", "0.59850407", "0.5894021", "0.5629577", "0.56196886", "0.5457541", "0.5436698", "0.5417699", "0.5383131", "0.53697276", "0.5362166", "0.5328199", "0.5324665", "0.5262586", "0.52602506", "0.5259905", "0.52521104", "0.5234652", "0.5222199", "0.5219996", "0.521683", "0.520833", "0.5199812", "0.51879185", "0.5141566", "0.51389223", "0.5132139", "0.5128927" ]
0.64353156
1
Guess an appropriate chunk layout for an array, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some powerof2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning.
def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: ndims = len(shape) # require chunks to have non-zero length for all dimensions chunks = np.maximum(np.array(shape, dtype="=f8"), 1) # Determine the optimal chunk size in bytes using a PyTables expression. # This is kept as a float. dset_size = np.prod(chunks) * typesize target_size = CHUNK_BASE * (2 ** np.log10(dset_size / (1024.0 * 1024))) if target_size > CHUNK_MAX: target_size = CHUNK_MAX elif target_size < CHUNK_MIN: target_size = CHUNK_MIN idx = 0 while True: # Repeatedly loop over the axes, dividing them by 2. Stop when: # 1a. We're smaller than the target chunk size, OR # 1b. We're within 50% of the target chunk size, AND # 2. The chunk is smaller than the maximum chunk size chunk_bytes = np.prod(chunks) * typesize if ( chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5 ) and chunk_bytes < CHUNK_MAX: break if np.prod(chunks) == 1: break # Element size larger than CHUNK_MAX chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0) idx += 1 return tuple(int(x) for x in chunks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:\n\n # N.B., expect shape already normalized\n\n # handle auto-chunking\n if chunks is None or chunks is True:\n return guess_chunks(shape, typesize)\n\n # handle no chunking\n if chunks is False:\n return shape\n\n # handle 1D convenience form\n if isinstance(chunks, numbers.Integral):\n chunks = tuple(int(chunks) for _ in shape)\n\n # handle bad dimensionality\n if len(chunks) > len(shape):\n raise ValueError(\"too many dimensions in chunks\")\n\n # handle underspecified chunks\n if len(chunks) < len(shape):\n # assume chunks across remaining dimensions\n chunks += shape[len(chunks) :]\n\n # handle None or -1 in chunks\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks))\n\n chunks = tuple(int(c) for c in chunks)\n return chunks", "def yield_chunks(arr, chunk_size):\r\n larr = len(arr)\r\n if larr < chunk_size:\r\n raise ValueError(\"The array length (%d) must be larger than the chunk size (%d)\" % (len(arr), chunk_size))\r\n\r\n cursor = 0\r\n while cursor < larr:\r\n next_cursor = min(cursor + chunk_size, larr)\r\n yield arr[cursor:next_cursor]\r\n cursor = next_cursor", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)", "def perform_chunking(self, data_size, chunk_size):\r\n\r\n chunks, i = [], 0\r\n while True:\r\n chunks.append((i * (chunk_size - self.overlap / 2), i * (chunk_size - self.overlap / 2) + chunk_size))\r\n i += 1\r\n if chunks[-1][1] > data_size:\r\n break\r\n\r\n n_count = len(chunks)\r\n chunks[-1] = tuple(x - (n_count * chunk_size - data_size - (n_count - 1) * self.overlap / 2) for x in chunks[-1])\r\n chunks = [(int(x), int(y)) for x, y in chunks]\r\n return chunks", "def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)", "def get_chunks(num_items, num_steps):\n chunk_sizes = np.zeros(num_steps, dtype=int)\n chunk_sizes[:] = num_items // num_steps\n chunk_sizes[:num_items % num_steps] += 1\n\n chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)\n chunk_offsets[0] = 0\n return chunk_sizes, chunk_offsets", "def calc_chunk_sizes(data_size, min_chunk_size=CHUNK_SIZE_MIN - 4,\n tgt_chunk_size=CHUNK_SIZE_TGT - 4,\n max_chunk_size=CHUNK_SIZE_MAX - 4):\n logger.debug('data size is %s' % data_size)\n logger.debug('calculating chunk sizes')\n if data_size < min_chunk_size:\n return [min_chunk_size]\n elif data_size <= tgt_chunk_size:\n return [data_size]\n # so now, we're only dealing with sizes > target size\n chunk_count = 1\n direction = 'up'\n changed_direction = False\n while 1:\n chunk_size = data_size * 1.0 / chunk_count\n margin = abs(tgt_chunk_size - chunk_size)\n if chunk_size == tgt_chunk_size:\n return [tgt_chunk_size,] * chunk_count\n elif changed_direction:\n if margin >= last_margin:\n chunk_count = last_chunk_count\n break\n elif chunk_size >= max_chunk_size:\n if direction == 'down':\n changed_direction = True\n direction = 'up'\n last_margin = margin\n last_chunk_count = chunk_count\n elif chunk_size <= min_chunk_size:\n if direction == 'up':\n changed_direction = True\n direction = 'down'\n last_margin = margin\n last_chunk_count = chunk_count\n else:\n last_margin = margin\n last_chunk_count = chunk_count\n if tgt_chunk_size < chunk_size < max_chunk_size and \\\n direction == 'down':\n # a little bigger than we'd like (need more pieces)\n direction = 'up'\n changed_direction = True\n elif min_chunk_size <= chunk_size < tgt_chunk_size and \\\n direction == 'up':\n # a bit too small (try fewer pieces)\n direction = 'down'\n changed_direction = True\n if direction == 'up':\n chunk_count += 1\n elif direction == 'down':\n chunk_count -= 1\n\n # Create the list of chunk sizes. Spread out the rounding difference among\n # the chunks so that they're close to the same size.\n chunk_size_raw = data_size * 1.0 / chunk_count\n logger.debug('unrounded chunk size is %s ' % chunk_size_raw)\n chunk_size = int(round(chunk_size_raw))\n logger.debug('rounded chunk size is %s' % chunk_size)\n total_round_diff = data_size - chunk_size * chunk_count\n msg = '%s rounded bytes to distribute among %s chunks'\n logger.debug(msg % (total_round_diff, chunk_count))\n # Round it to a full byte. If it's negative, round down because rounding\n # up will take away too many chunks.\n if total_round_diff < 0:\n total_round_diff = int(math.floor(total_round_diff))\n else:\n total_round_diff = int(math.ceil(total_round_diff))\n round_diff_sum = total_round_diff\n chunk_sizes = []\n for i in range(chunk_count):\n if round_diff_sum > 0:\n this_chunk_size = chunk_size + 1\n round_diff_sum -= 1\n elif round_diff_sum < 0:\n this_chunk_size = chunk_size - 1\n round_diff_sum += 1\n else:\n this_chunk_size = chunk_size\n chunk_sizes.append(this_chunk_size)\n assert data_size == sum(chunk_sizes), '%s, %s' (data_size,\n sum(chunk_sizes))\n return chunk_sizes", "def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def iterate_array_in_chunks(arr, size: int):\n for i in range(0, len(arr), size):\n yield arr[i:i+size]", "def dur_chunk_sizes(n, ary):\n ret = np.ones((ary,), dtype=np.int32) * (n // ary)\n ret[: n % ary] = n // ary + 1\n assert ret.sum() == n\n return ret", "def split_array(array: np.ndarray, parts: int):\n\n if parts == -1:\n parts = array.size\n shape = array.shape\n possible_chunk_sizes = []\n # Generate all possible chunk sizes for the given array shape\n for chunk_size in product(*[range(1, shape[i] + 1) for i in range(len(shape))]):\n # Check if the number of chunks generated by the current chunk size is equal to the desired number of parts\n if np.prod(\n [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]) == parts:\n possible_chunk_sizes.append(chunk_size)\n # Sort the possible chunk sizes in ascending order of the sum of the squares of their dimensions\n possible_chunk_sizes.sort(key=lambda x: np.sum(np.array(x) ** 2)) # type: ignore\n if not possible_chunk_sizes:\n logging.warning(\"Could not divide the domain in %d parts. Trying with parts=%d.\", parts, parts - 1)\n return split_array(array=array, parts=parts - 1)\n selected_chunk_size = possible_chunk_sizes[0]\n\n chunks = []\n # Get the number of chunks for the first possible chunk size\n num_chunks = [shape[i] // selected_chunk_size[i] + int(shape[i] % selected_chunk_size[i] != 0) for i in\n range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n # Iterate over the chunks and append the corresponding slice of the array to the chunks list\n for indx in product(*indexes):\n current_slice = tuple(\n slice(selected_chunk_size[i] * indx[i], min(selected_chunk_size[i] * (indx[i] + 1), shape[i])) for i in\n range(len(shape)))\n chunks.append(array[current_slice])\n return chunks", "def look_for_biggest_structure(game, chunk, imgs, hmap, nmax, type_):\n for n in range(nmax,0,-1):\n i = 0\n m = parameters.MAX_VILLAGE_WIDTH * n / parameters.MAX_VILLAGE_SIZE\n while i < parameters.VILLAGE_TRY:\n chunkpos = np.random.randint(0,parameters.S,2)\n cx,cy = chunkpos\n h = np.sum(hmap[cx:cx+m,cy:cy+m]) / (m*m)\n if h > parameters.VILLAGE_LEVEL:\n force_build_structure(game, imgs, chunk, chunkpos, n, type_)\n return n\n i += 1\n return 0", "def calculateChunkSize(size, record_count, splits):\n avg_record_size = size / record_count\n logging.info(\n \"Avg record size: %0.02f=%d/%d\" %\n (avg_record_size, size, record_count))\n chunk = floor(ceil(size / (splits * avg_record_size)) * avg_record_size)\n\n logging.info(\n \"Setting chunk to: %d=floor(ceil(%d/(%d*%0.02f))*%0.02d)\" %\n (chunk, size, splits, avg_record_size, avg_record_size))\n return chunk", "def reshape_as_blocks(data, block_size):\n data, block_size = _process_block_inputs(data, block_size)\n\n if np.any(np.mod(data.shape, block_size) != 0):\n raise ValueError(\n \"Each dimension of block_size must divide evenly \"\n \"into the corresponding dimension of data\"\n )\n\n nblocks = np.array(data.shape) // block_size\n new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)\n nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices\n block_idx = tuple(range(1, len(new_shape), 2)) # odd indices\n\n return data.reshape(new_shape).transpose(nblocks_idx + block_idx)", "def split_by_size(array: Iterable[T], size: int) -> Iterable[List[T]]:\n chunk = []\n for e in array:\n chunk.append(e)\n if len(chunk) == size:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk", "def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits", "def ensure_chunk_size(da: xr.DataArray, **minchunks: int) -> xr.DataArray:\n if not uses_dask(da):\n return da\n\n all_chunks = dict(zip(da.dims, da.chunks))\n chunking = dict()\n for dim, minchunk in minchunks.items():\n chunks = all_chunks[dim]\n if minchunk == -1 and len(chunks) > 1:\n # Rechunk to single chunk only if it's not already one\n chunking[dim] = -1\n\n toosmall = np.array(chunks) < minchunk # Chunks that are too small\n if toosmall.sum() > 1:\n # Many chunks are too small, merge them by groups\n fac = np.ceil(minchunk / min(chunks)).astype(int)\n chunking[dim] = tuple(\n sum(chunks[i : i + fac]) for i in range(0, len(chunks), fac)\n )\n # Reset counter is case the last chunks are still too small\n chunks = chunking[dim]\n toosmall = np.array(chunks) < minchunk\n if toosmall.sum() == 1:\n # Only one, merge it with adjacent chunk\n ind = np.where(toosmall)[0][0]\n new_chunks = list(chunks)\n sml = new_chunks.pop(ind)\n new_chunks[max(ind - 1, 0)] += sml\n chunking[dim] = tuple(new_chunks)\n\n if chunking:\n return da.chunk(chunks=chunking)\n return da", "def __initialize_from_shape_and_chunk_size(self, shape: Tuple[int, ...], chunk_size: Tuple[int, ...]):\n collection_shape = tuple(\n shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape)))\n objects = np.empty(collection_shape, dtype=MultiDimensionalSlice)\n num_chunks = [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n\n for indx in product(*indexes):\n current_slice = tuple(\n slice(chunk_size[i] * indx[i], min(chunk_size[i] * (indx[i] + 1), shape[i])) for i in range(len(shape)))\n objects[indx] = MultiDimensionalSlice(indices=indx, slices=current_slice)\n self.__initialize_from_array(objects_array=objects)", "def split_into_subarrays_of_max_len(arr, max_len=44100):\n return np.split(arr, np.arange(max_len, len(arr), max_len))", "def chunks(array, size: int):\r\n for i in range(0, len(array), size):\r\n yield array[i:i + size]", "def get_blocks_shape(big_array, small_array):\n return tuple([int(b/s) for b, s in zip(big_array, small_array)])", "def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]", "def window_blocks(large_array, window_size):\n y_size = large_array.shape[0]/window_size\n blocks_array = large_array.reshape(y_size, window_size)\n return blocks_array", "def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)", "def _chunkify(arr, size):\n arrs = []\n for i in range(0, len(arr), size):\n chunk = bytearray(arr[i:i + size])\n arrs.append(chunk)\n return arrs", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim+1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0]*ndim; rshape = [0]*ndim\n lslices = [0]*ndim; rslices = [0]*ndim\n\n for n in range(ndim):\n small = randrange(minshape, maxshape+1)\n big = randrange(minshape, maxshape+1)\n if big < small:\n big, small = small, big\n\n # Create a slice that fits the smaller value.\n if all_random:\n start = randrange(-small, small+1)\n stop = randrange(-small, small+1)\n step = (1,-1)[randrange(2)] * randrange(1, small+2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small+1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n\n # Create a slice of the same length for the bigger value.\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n\n return lshape, rshape, tuple(lslices), tuple(rslices)", "def _calculate_step_sizes(x_size, y_size, num_chunks):\n # First we try to split only along fast x axis\n xstep = max(1, int(x_size / num_chunks))\n\n # More chunks are needed only if xstep gives us fewer chunks than\n # requested.\n x_chunks = int(x_size / xstep)\n\n if x_chunks >= num_chunks:\n ystep = y_size\n else:\n # The x and y loops are nested, so the number of chunks\n # is multiplicative, not additive. Calculate the number\n # of y chunks we need to get at num_chunks.\n y_chunks = int(num_chunks / x_chunks) + 1\n ystep = max(1, int(y_size / y_chunks))\n\n return xstep, ystep", "def get_chunk_slices(ds_dim, chunk_size):\n chunks = list(range(0, ds_dim, chunk_size))\n if chunks[-1] < ds_dim:\n chunks.append(ds_dim)\n else:\n chunks[-1] = ds_dim\n\n chunks = list(zip(chunks[:-1], chunks[1:]))\n\n return chunks" ]
[ "0.644044", "0.629798", "0.61731964", "0.60461324", "0.598301", "0.5948597", "0.59229267", "0.5806321", "0.57971877", "0.5785244", "0.57549477", "0.56843406", "0.5641695", "0.5637497", "0.56076527", "0.5582129", "0.55698544", "0.556121", "0.55578834", "0.5544379", "0.5536151", "0.5526536", "0.55035055", "0.5464144", "0.5453769", "0.5418642", "0.5414757", "0.5408439", "0.53291434", "0.53081846" ]
0.7806975
0
Convenience function to normalize the `chunks` argument for an array with the given `shape`.
def normalize_chunks(chunks: Any, shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]: # N.B., expect shape already normalized # handle auto-chunking if chunks is None or chunks is True: return guess_chunks(shape, typesize) # handle no chunking if chunks is False: return shape # handle 1D convenience form if isinstance(chunks, numbers.Integral): chunks = tuple(int(chunks) for _ in shape) # handle bad dimensionality if len(chunks) > len(shape): raise ValueError("too many dimensions in chunks") # handle underspecified chunks if len(chunks) < len(shape): # assume chunks across remaining dimensions chunks += shape[len(chunks) :] # handle None or -1 in chunks if -1 in chunks or None in chunks: chunks = tuple(s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks)) chunks = tuple(int(c) for c in chunks) return chunks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normalize_shape(shape):\n\n if isinstance(shape, (np.integer, int)):\n if shape < 1:\n raise ValueError(\"shape value must be greater than 0: %d\"\n % shape)\n shape = (shape,) # N is a shorthand for (N,)\n try:\n shape = tuple(shape)\n except TypeError:\n raise TypeError(\"shape must be an integer or sequence: %r\"\n % (shape,))\n\n # XXX Get from HDF5 library if possible.\n # HDF5 does not support ranks greater than 32\n if len(shape) > 32:\n raise ValueError(\n f\"shapes with rank > 32 are not supported: {shape!r}\")\n\n return tuple(SizeType(s) for s in shape)", "def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalize(a, axis=None):\n a_sum = a.sum(axis)\n if axis and a.ndim > 1:\n a_sum[a_sum == 0] = 1\n shape = list(a.shape)\n shape[axis] = 1\n a_sum.shape = shape\n\n return a / a_sum", "def rechunk(array, chunk_size=None, chunk_overlap=None):\n\n # deal with chunk sizes\n ds = array.datashape.copy()\n if chunk_size is None:\n chunk_size = ds.chunk_size\n if isinstance(chunk_size, int):\n chunk_size = [chunk_size] * ds.ndim\n ds.chunk_size = chunk_size\n\n if chunk_overlap is None:\n chunk_overlap = ds.chunk_overlap\n if isinstance(chunk_overlap, int):\n chunk_overlap = [chunk_overlap] * ds.ndim\n ds.chunk_overlap = chunk_overlap\n\n if ds != array.datashape:\n array = array.redimension(ds.schema)\n return array", "def normalize_shape(shape: Union[int, Tuple[int, ...], None]) -> Tuple[int, ...]:\n\n if shape is None:\n raise TypeError(\"shape is None\")\n\n # handle 1D convenience form\n if isinstance(shape, numbers.Integral):\n shape = (int(shape),)\n\n # normalize\n shape = cast(Tuple[int, ...], shape)\n shape = tuple(int(s) for s in shape)\n return shape", "def normalize_chunks(\n chunks: Mapping[str, Union[int, Tuple[int, ...]]],\n dim_sizes: Mapping[str, int],\n) -> Dict[str, int]:\n if not chunks.keys() <= dim_sizes.keys():\n raise ValueError(\n 'all dimensions used in chunks must also have an indicated size: '\n f'chunks={chunks} vs dim_sizes={dim_sizes}')\n result = {}\n for dim, size in dim_sizes.items():\n if dim not in chunks:\n result[dim] = size\n elif isinstance(chunks[dim], tuple):\n unique_chunks = set(chunks[dim])\n if len(unique_chunks) != 1:\n raise ValueError(\n f'chunks for dimension {dim} are not constant: {unique_chunks}',\n )\n result[dim], = unique_chunks\n elif chunks[dim] == -1:\n result[dim] = size\n else:\n result[dim] = chunks[dim]\n return result", "def roi_normalise(roi, shape):\n\n def fill_if_none(x, val_if_none):\n return val_if_none if x is None else x\n\n def norm_slice(s, n):\n start = fill_if_none(s.start, 0)\n stop = fill_if_none(s.stop, n)\n start, stop = [x if x >= 0 else n+x for x in (start, stop)]\n return slice(start, stop, s.step)\n\n if not isinstance(shape, collections.abc.Sequence):\n shape = (shape,)\n\n if isinstance(roi, slice):\n return norm_slice(roi, shape[0])\n\n return tuple([norm_slice(s, n) for s, n in zip(roi, shape)])", "def normalised(a: np.ndarray, order: int = None, axis: int = -1) -> np.ndarray:\n norm = np.atleast_1d(np.linalg.norm(a, order, axis))\n return a / np.expand_dims(norm, axis)", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def array_rebin(data, shape):\n\n # Ensure dimensions are consistent\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n\n # Get pairs of (shape, bin factor) for each dimension\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n\n # Rebin the array\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data", "def _fit_array_to_image(base_shape, array: np.ndarray) -> np.ndarray:\n shape = list(array.shape)\n for i, el in enumerate(base_shape):\n if el == 1 and el != shape[i]:\n shape.insert(i, 1)\n elif el != shape[i]:\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n if len(shape) != len(base_shape):\n raise ValueError(f\"Wrong array shape {shape} for {base_shape}\")\n return np.reshape(array, shape)", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize(arr):\n\n total = sum(arr)\n\n return list(map(lambda x: x / total, arr))", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def _normalize_sequence(arr, rank):\n if hasattr(arr, \"__iter__\") and not isinstance(arr, str):\n if isinstance(arr, cupy.ndarray):\n arr = cupy.asnumpy(arr)\n normalized = list(arr)\n if len(normalized) != rank:\n err = \"sequence argument must have length equal to arr rank\"\n raise RuntimeError(err)\n else:\n normalized = [arr] * rank\n return normalized", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(nparray, order=2, axis=0):\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)", "def __preprocess(data, sample_size: int = 200000):\n mean = data[:sample_size].mean(axis=0)\n data -= mean\n stdev = data[:sample_size].std(axis=0)\n data /= stdev\n return data", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def BatchNormalization(inputs, data_format):\n return tf.layers.BatchNormalization(axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n scale=True)(inputs)", "def __normalize_after_fft(arr):\n\n n1, n2 = arr.shape[0], arr.shape[1]\n for i in range(n1):\n for j in range(n2):\n arr[i, j] *= n1 * n2\n\n return arr", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize(arr, stats=False):\n arr = np.array(arr)\n mean = arr.mean()\n std = arr.std()\n normed = (arr - mean) / std\n if not stats:\n return normed\n return normed, mean, std", "def normalize(arr, eps):\n\n norm = cuda.reduce('T x', 'T out',\n 'x * x', 'a + b', 'out = sqrt(a)', 0,\n 'norm_sn')(arr)\n cuda.elementwise('T norm, T eps',\n 'T x',\n 'x /= (norm + eps)',\n 'div_sn')(norm, eps, arr)\n return norm", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def reshape(x, shape):\n return Reshape(shape)(x)" ]
[ "0.61362606", "0.60389656", "0.58118457", "0.5734301", "0.5696832", "0.5670947", "0.5670829", "0.5670006", "0.56443065", "0.5637183", "0.56079644", "0.55675006", "0.5559034", "0.55089456", "0.5506106", "0.5494193", "0.54827696", "0.5472908", "0.54428035", "0.54304826", "0.54268885", "0.53581226", "0.535042", "0.52944744", "0.5291196", "0.5289186", "0.5285938", "0.52779835", "0.5254062", "0.5238026" ]
0.7882431
0
Determine whether `item` specifies a complete slice of array with the given `shape`. Used to optimize __setitem__ operations on the Chunk class.
def is_total_slice(item, shape: Tuple[int]) -> bool: # N.B., assume shape is normalized if item == Ellipsis: return True if item == slice(None): return True if isinstance(item, slice): item = (item,) if isinstance(item, tuple): return all( ( isinstance(it, slice) and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None]))) ) for it, sh in zip(item, shape) ) else: raise TypeError("expected slice or tuple of slices, found %r" % item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def roi_is_full(roi, shape):\n def slice_full(s, n):\n return s.start in (0, None) and s.stop in (n, None)\n\n if isinstance(roi, slice):\n roi = (roi,)\n shape = (shape,)\n\n return all(slice_full(s, n) for s, n in zip(roi, shape))", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def __getitem__(self, item):\n # type (Any) -> Any\n # Workaround for Arrow bug that segfaults on empty slice.\n # This is fixed in Arrow master, will be released in 0.10\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n step = item.step if item.step is not None else 1\n # Arrow can't handle slices with steps other than 1\n # https://issues.apache.org/jira/browse/ARROW-2714\n if step != 1:\n arr = np.asarray(self)[item]\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if pa.types.is_integer(self.dtype.arrow_dtype) or pa.types.is_floating(\n self.dtype.arrow_dtype\n ):\n mask = pd.isna(arr)\n else:\n mask = None\n return type(self)(pa.array(arr, type=self.dtype.arrow_dtype, mask=mask))\n if stop - start == 0:\n return type(self)(pa.array([], type=self.data.type))\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean arrays are valid indices.\"\n )\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value.as_py()", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def __getitem__(self, item: SliceLike):\n\n if item == Ellipsis:\n return JaggedArray(data=self.data[...], shape=self.shape[...])\n elif isinstance(item, slice):\n # slow but works\n return self.__class__.from_aoa(self.to_aoa()[item])\n else:\n return self.data[slice(*self._cumsum[item : item + 2])].reshape(\n self.shape[:, item]\n )", "def __getitem__(self, item):\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n if stop - start == 0:\n return type(self)(xnd.xnd([], type=self.data.type))\n\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean \\\n arrays are valid indices.\"\n )\n\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n else:\n\n return self.data[item]\n\n value = self.data[item]\n return type(self)(value)", "def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside", "def _is_dim_removed_by_splitting(cls, graph: NNCFGraph, node: NNCFNode) -> Optional[int]:\n split_axis = None\n if isinstance(node.layer_attributes, GetItemLayerAttributes):\n input_edge = graph.get_input_edges(node)[0]\n input_shape = input_edge.tensor_shape\n parent_node = input_edge.from_node\n child_nodes = graph.get_next_nodes(parent_node)\n child_attributes = [cnode.layer_attributes for cnode in child_nodes]\n all_getitem = all(isinstance(ca, GetItemLayerAttributes) for ca in child_attributes)\n assert all_getitem, \"currently supported only case with all __getitem__ on branches\"\n all_int_keys = all(isinstance(ca.key, int) for ca in child_attributes)\n # currently supported only case __getitem__ with single int, no slices\n if not all_int_keys:\n return None\n all_keys = set(ca.key for ca in child_attributes)\n split_dim = input_shape[0]\n if all_keys == set(range(split_dim)):\n split_axis = 0\n return split_axis", "def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset + itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n imin = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] <= 0)\n imax = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[\n j] > 0)\n return 0 <= offset + imin and offset + imax + itemsize <= memlen", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset+itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n\n imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] <= 0)\n imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] > 0)\n\n return 0 <= offset+imin and offset+imax+itemsize <= memlen", "def IsItemVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def is_slice(self) -> bool:\n return self._is_slice", "def can_grow(self, item):\n raise NotImplementedError('Child class must implement can_grow')", "def check_and_image_shape(item: ValueType, shape: List) -> List:\n if len(item.shape) > 0:\n item = str(item[0])\n if item.endswith(('.jpg', '.jpeg', '.png')):\n import cv2\n im = cv2.imread(item)\n if im is not None:\n return list(im.shape)\n return shape", "def has_shape(node):\n allowed_shapes = (\n pm.nt.Mesh,\n pm.nt.NurbsCurve,\n pm.nt.NurbsSurface\n )\n\n has_it = False\n\n children = node.getChildren()\n while len(children) and not has_it:\n child = children.pop(0)\n if isinstance(child, allowed_shapes):\n has_it = True\n break\n children += child.getChildren()\n\n return has_it", "def IsVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def __getitem__(self, item: slice | tuple):\n if isinstance(item, slice):\n start, stop = item.start, item.stop\n if start is None:\n start = 0\n if stop is None:\n stop = maxsize\n if start > stop:\n raise IndexError(\"make sure start <= stop\")\n return self.query(Orthotope([Interval(start, stop)]))\n elif isinstance(item, tuple):\n pass\n else:\n raise TypeError(f\"unrecognized index {item}\")", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def owns_shape(self, pm_shape):\n all_shapes = [part.pm_visible_shape for part in self.parts]\n if pm_shape in all_shapes:\n return True\n return False", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)", "def is_shape_dynamic(shape: trt.Dims) -> bool:\n return any([is_dimension_dynamic(dim) for dim in shape])", "def __getitem__(\n self, item: Union[int, slice, torch.BoolTensor]\n ) -> \"DensePoseChartPredictorOutput\":\n if isinstance(item, int):\n return DensePoseChartPredictorOutput(\n coarse_segm=self.coarse_segm[item].unsqueeze(0),\n fine_segm=self.fine_segm[item].unsqueeze(0),\n u=self.u[item].unsqueeze(0),\n v=self.v[item].unsqueeze(0),\n )\n else:\n return DensePoseChartPredictorOutput(\n coarse_segm=self.coarse_segm[item],\n fine_segm=self.fine_segm[item],\n u=self.u[item],\n v=self.v[item],\n )", "def _apply_item(self, item: Item) -> bool:\n return False", "def _is_extended_slice(s):\n\n return s.step is not None and s.step != 1", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)" ]
[ "0.5930391", "0.5894886", "0.5701367", "0.56653273", "0.5539224", "0.54814374", "0.54136163", "0.54108244", "0.5387186", "0.5382678", "0.5344155", "0.5341929", "0.53009504", "0.5299111", "0.52924687", "0.5279965", "0.52784073", "0.52331346", "0.5224626", "0.51978904", "0.51833797", "0.5111612", "0.5106401", "0.5096898", "0.5058394", "0.5024388", "0.50093496", "0.5006892", "0.49975714", "0.49951485" ]
0.780188
0
Test if all the elements of an array are equivalent to a value. If `value` is None, then this function does not do any comparison and returns False.
def all_equal(value: Any, array: Any): if value is None: return False if not value: # if `value` is falsey, then just 1 truthy value in `array` # is sufficient to return False. We assume here that np.any is # optimized to return on the first truthy value in `array`. try: return not np.any(array) except (TypeError, ValueError): # pragma: no cover pass if np.issubdtype(array.dtype, np.object_): # we have to flatten the result of np.equal to handle outputs like # [np.array([True,True]), True, True] return all(flatten(np.equal(value, array, dtype=array.dtype))) else: # Numpy errors if you call np.isnan on custom dtypes, so ensure # we are working with floats before calling isnan if np.issubdtype(array.dtype, np.floating) and np.isnan(value): return np.all(np.isnan(array)) else: # using == raises warnings from numpy deprecated pattern, but # using np.equal() raises type errors for structured dtypes... return np.all(value == array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_equal(array):\n if not array:\n raise ValueError(\"Array is empty\")\n\n first_item = array[0]\n\n if any(item != first_item for item in array):\n return False\n\n return True", "def has_equal_values_vec(x):\n return jnp.all(x == x[0])", "def check_array(self, array: ArrayData, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n assert len(value) <= len(array), \"Array to compare against must contain a smaller or equal amount of elements.\"\n expected = self.array(value).name\n actual = \"la a2, \" + self._lookup_array(array)\n self._compare_int_array(array.name, actual, expected, value, exit_code = 2)", "def containsValue(self, value):\n for val in values():\n if val == value or val == value:\n return True\n return False", "def value_checker(self, puzzle: List[int], value: int) -> bool:\n if len(puzzle) == 0:\n return False\n\n if len(puzzle) == 1:\n if puzzle[0] is value:\n return True\n else:\n return False\n\n mid = len(puzzle) // 2\n left = self.value_checker(puzzle[:mid], value)\n right = self.value_checker(puzzle[mid:], value)\n\n return left or right", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False", "def edge_case(values):\r\n for val in values:\r\n if val is True:\r\n return False\r\n return True", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def check_all_iterable_values_equal(iterable):\n return all(second_value_onwards == iterable[0] for second_value_onwards in iterable[1:])", "def _has_at_least_one_value(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] == v:\n return True\n return False", "def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True", "def array_equal_to(obj):\n return ArrayIsEqual(obj)", "def check_array(self, v, t):\n raise NotImplementedError('check_array')", "def _has_values(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] != v:\n return False\n return True", "def are_equal(value1, value2):\n if value1 == None or value2 == None:\n return True\n if value1 == None or value2 == None:\n return False\n return value1 == value2", "def __call__(self, value: np.ndarray) -> bool:\n for k, bound in enumerate(self.bounds):\n if bound is not None:\n if np.any((value > bound) if k else (value < bound)):\n return False\n return True", "def any_values(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield any(v)", "def _all_na_or_values(series, values):\n series_excl_na = series[series.notna()]\n if not len(series_excl_na):\n out = True\n elif series_excl_na.isin(values).all():\n out = True\n else:\n out = False\n return out", "def array_equal_nan(a1, a2):\n try:\n a1, a2 = np.asarray(a1), np.asarray(a2)\n except Exception:\n return False\n if a1.shape != a2.shape:\n return False\n # Handling NaN values\n a1nan, a2nan = np.isnan(a1), np.isnan(a2)\n # NaN's occur at different locations\n if not (a1nan == a2nan).all():\n return False\n # Shapes of a1, a2 and masks are guaranteed to be consistent by this point\n return bool(np.asarray(a1[~a1nan] == a2[~a1nan]).all())", "def array_equivalent(left, right, strict_nan=False):\n\n left, right = np.asarray(left), np.asarray(right)\n\n # shape compat\n if left.shape != right.shape:\n return False\n\n # Object arrays can contain None, NaN and NaT.\n # string dtypes must be come to this path for NumPy 1.7.1 compat\n if is_string_dtype(left) or is_string_dtype(right):\n\n if not strict_nan:\n # isnull considers NaN and None to be equivalent.\n return lib.array_equivalent_object(\n _ensure_object(left.ravel()), _ensure_object(right.ravel()))\n\n for left_value, right_value in zip(left, right):\n if left_value is NaT and right_value is not NaT:\n return False\n\n elif isinstance(left_value, float) and np.isnan(left_value):\n if (not isinstance(right_value, float) or\n not np.isnan(right_value)):\n return False\n else:\n if left_value != right_value:\n return False\n return True\n\n # NaNs can occur in float and complex arrays.\n if is_float_dtype(left) or is_complex_dtype(left):\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n # numpy will will not allow this type of datetimelike vs integer comparison\n elif is_datetimelike_v_numeric(left, right):\n return False\n\n # M8/m8\n elif needs_i8_conversion(left) and needs_i8_conversion(right):\n if not is_dtype_equal(left.dtype, right.dtype):\n return False\n\n left = left.view('i8')\n right = right.view('i8')\n\n # NaNs cannot occur otherwise.\n try:\n return np.array_equal(left, right)\n except AttributeError:\n # see gh-13388\n #\n # NumPy v1.7.1 has a bug in its array_equal\n # function that prevents it from correctly\n # comparing two arrays with complex dtypes.\n # This bug is corrected in v1.8.0, so remove\n # this try-except block as soon as we stop\n # supporting NumPy versions < 1.8.0\n if not is_dtype_equal(left.dtype, right.dtype):\n return False\n\n left = left.tolist()\n right = right.tolist()\n\n return left == right", "def all_equal(sequence):\n return all(x == sequence[0] for x in sequence)", "def is_integer(value: Union[float, np.ndarray]) -> bool:\n if type(value) == np.ndarray:\n for entry in value:\n result = Comparator.is_integer(entry)\n if not result:\n return False\n return True\n else:\n value = abs(value)\n value -= int(value)\n if value > 0.5:\n return Comparator.is_close_to_zero(1 - value)\n return Comparator.is_close_to_zero(value)", "def __is_valid_value(self, target_row, target_col, value):\n if value == 0:\n return True # 0's are always a valid value since they are a placeholder (signify empty position)\n\n # Check row and column:\n for i in range(9):\n if self.final_values[i][target_col] == value and i != target_row: # Check column\n return False # Value appears on the same column twice\n if self.final_values[target_row][i] == value and i != target_col: # Check row\n return False # Value appears on the same row twice\n\n # Find start of 3x3 block:\n block_row = target_row - (target_row % 3)\n block_col = target_col - (target_col % 3)\n\n # Check each element in the 3x3 block:\n for row in range(3):\n for col in range(3):\n if value == self.final_values[block_row + row][block_col + col] and block_row + row != target_row and block_col + col != target_col:\n return False # Value appears in the same block twice\n\n return True # Value does not appear in the same row, col or block", "def _validate_value_type(value: Any, expected: Sequence[Type]) -> bool:\n\n for entry in expected:\n if get_origin(entry) is None:\n if type(value) == entry: # pylint: disable=unidiomatic-typecheck\n return True\n continue\n if _validate_value_type(value, get_args(entry)):\n return True\n return False", "def _confirm_constant(a):\n a = np.asanyarray(a)\n return np.isclose(a, 1.0).all(axis=0).any()", "def arrNotInArrList(arr, arrList):\n a = np.array(arr)\n for item in arrList:\n item = np.array(item)\n if np.array_equiv(item, a):\n return False\n return True", "def all_values(*values):\n print(\"here\")\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield all(v)", "def is_null(value: Any) -> bool:\n return not value", "def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )" ]
[ "0.6699362", "0.6486526", "0.6385027", "0.6280364", "0.61277896", "0.612132", "0.6043027", "0.60365415", "0.5891498", "0.5891476", "0.5875021", "0.5753058", "0.5749066", "0.57388574", "0.5702565", "0.56945324", "0.5661426", "0.56569785", "0.56512713", "0.56494707", "0.56261206", "0.5598292", "0.5581735", "0.55642754", "0.5562079", "0.55550855", "0.5546242", "0.55428535", "0.55401695", "0.5523063" ]
0.7799601
0
Convenience function to coerce `buf` to ndarraylike array or bytes. First check if `buf` can be zerocopy converted to a contiguous array. If not, `buf` will be copied to a newly allocated `bytes` object.
def ensure_contiguous_ndarray_or_bytes(buf) -> Union[NDArrayLike, bytes]: try: return ensure_contiguous_ndarray_like(buf) except TypeError: # An error is raised if `buf` couldn't be zero-copy converted return ensure_bytes(buf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buffer_to_bytes(buf):\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf", "def test_array_as_buffer(parser):\n doc = parser.parse(b'''{\n \"d\": [1.2, 2.3, 3.4],\n \"i\": [-1, 2, -3, 4],\n \"u\": [1, 2, 3, 4, 5],\n \"x\": [1, 2, 3, \"not valid\"]\n }''')\n\n memoryview(doc['d'].as_buffer(of_type='d'))\n memoryview(doc['i'].as_buffer(of_type='i'))\n memoryview(doc['u'].as_buffer(of_type='u'))\n\n # Not a valid `of_type`.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='x')\n\n # Not a valid homogeneous array.\n with pytest.raises(TypeError):\n doc['x'].as_buffer(of_type='u')\n\n # Signed elements should error on cast.\n with pytest.raises(ValueError):\n doc['i'].as_buffer(of_type='u')", "def _to_bytes_or_str_array(result, output_dtype_like=None):\n ret = numpy.asarray(result.tolist())\n dtype = getattr(output_dtype_like, 'dtype', None)\n if dtype is not None:\n return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)\n return ret", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)", "def tobytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_byte * buf_len).from_buffer(buf)\n return buffer", "def toubytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte * buf_len).from_buffer(buf)\n return buffer", "def to_bytes_io(b):\n return b.buffer if sys.version_info.major == 3 else b", "def frombuffer(buffer, **kwargs):\n\n return call_origin(numpy.frombuffer, buffer, **kwargs)", "def decode_buffer(buf):\n return buf.getvalue().decode('utf-8')", "def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "def _read_to_buffer(cls, buf, stream):\n # We could read it in one step, but instead we'll read it in chunks to avoid big temporaries.\n # (See below.)\n # buf[:] = stream.read( len(buf) )\n\n # Read data from the stream in chunks\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n buf[chunk_start:chunk_stop] = stream.read( next_chunk_bytes )\n remaining_bytes -= next_chunk_bytes", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def get_buf(self, data_type = \"void\"):\n if self.buf is not None:\n return ffi.cast(data_type + \"*\", self.buf)\n else:\n raise RuntimeError(\"Buffer not created.\")", "def _as_numpy(y):\n if y is None:\n return None\n elif isinstance(y, np.ndarray):\n return np.copy(y)\n elif hasattr(y, 'as_matrix'):\n return y.as_matrix()\n elif hasattr(y, 'tolist'):\n return y.tolist()\n elif is_iterable(y):\n return np.asarray([i for i in y]) # might accidentally force object type in 3\n raise TypeError('cannot convert type %s to numpy ndarray' % type(y))", "def _deserialize_byte_array(shape, ba, offset):\n ar = ndarray(shape=shape, buffer=ba, offset=offset, dtype=\"float64\",\n order='C')\n return ar.copy()", "def test_array_as_buffer_ndim(parser):\n doc = parser.parse(b'''[[\n [1.0, 2.0],\n [3.0, 4.0]\n ]]''')\n view = memoryview(doc.as_buffer(of_type='d'))\n assert len(view) == 32", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChain._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def make_buffer():\n return BytesIO()", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def load_frombuffer(buf):\n if not isinstance(buf, string_types + tuple([bytes])):\n raise TypeError('buf required to be a string or bytes')\n out_size = mx_uint()\n out_name_size = mx_uint()\n handles = ctypes.POINTER(NDArrayHandle)()\n names = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.MXNDArrayLoadFromBuffer(buf,\n mx_uint(len(buf)),\n ctypes.byref(out_size),\n ctypes.byref(handles),\n ctypes.byref(out_name_size),\n ctypes.byref(names)))\n if out_name_size.value == 0:\n return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]\n else:\n assert out_name_size.value == out_size.value\n return dict(\n (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))\n for i in range(out_size.value))", "def as_bytes(array_or_image,mimetype='image/png'):\n buf = StringIO()\n fmt = mimetype2format(mimetype)\n im = as_pil(array_or_image).save(buf,fmt)\n return buf.getvalue()", "def test_safe_array_cast(self):\n msg = '^Copying array of size \\(5, 5\\) to convert it in the ' \\\n 'right format$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X.astype(int))\n\n msg = '^Copying array of size \\(3, 5\\) to create a ' \\\n 'C-contiguous version of it$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X[::2])\n\n np.testing.assert_array_equal(self.X,\n PoissonRegression._safe_array(self.X))", "def array(a: any,\n dtype: any = None,\n order: {'C', 'F', 'A', 'K'} = 'K',\n *,\n alignment: int = 16,\n copy: bool = True,\n **kwargs) -> np.ndarray:\n\n # Store reference to the original array\n _a = a\n\n # Get array\n a = np.asarray(_a, dtype=dtype, order=order)\n\n # Check if a new copy is created\n _new = a is not _a\n\n # Get dtype, size and alignment\n dtype = a.dtype\n shape = a.shape\n size = np.prod(shape)\n order = 'C' if a.flags.c_contiguous else 'F'\n alignment = int(alignment)\n\n # Check alignment is compatible\n if alignment % dtype.itemsize:\n raise ValueError(\n f\"{dtype} is not compatible with 'alignment={alignment}'\")\n\n # If new, check alignment and eventually return if already aligned\n if (_new or not copy) and isaligned(a, alignment=alignment):\n return a\n\n # Get max_shift\n max_shift = alignment // dtype.itemsize\n\n # If _new, resize\n if _new:\n # Resize memory\n a.resize(size + max_shift)\n\n # Reference to buffer\n buffer = a\n\n # Return to the orginal size\n a = a[:size]\n\n # Otherwise, get new buffer\n else:\n buffer = np.empty((size + max_shift,), dtype=dtype, order=order)\n\n # Get right shift\n shift = (alignment - (buffer.ctypes.data % alignment)) // dtype.itemsize\n assert (shift <= max_shift)\n\n # Re-align if needed\n buffer = buffer[shift:size + shift]\n\n # Reshape\n buffer = np.reshape(buffer, shape, order=order)\n\n # Check alignment\n assert (isaligned(buffer, alignment=alignment))\n\n # Copy if a was provided\n np.copyto(buffer, np.reshape(a, shape, order=order))\n\n # Return buffer\n return buffer", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaCertificate._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def convert_array(blob):\n out = io.BytesIO(blob)\n out.seek(0)\n\n return np.load(out)", "def asanyarray(a, dtype=None, order='C'):\n\n if not use_origin_backend(a):\n # if it is already dpnp.ndarray then same object should be returned\n if isinstance(a, dpnp.ndarray):\n return a\n\n if order != 'C':\n checker_throw_value_error(\"asanyarray\", \"order\", order, 'C')\n\n return array(a, dtype=dtype, order=order)\n\n return call_origin(numpy.asanyarray, a, dtype, order)", "def as_bytearray(self):\n\n if self.index < 7:\n return self.buf + bytearray([self.byte])\n else:\n return self.buf", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChainDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def test_write_bufferprotocol(ctx):\n data = array('f', [1, 2, 3, 4])\n buff = ctx.buffer(data=data)\n assert buff.read() == data.tobytes()" ]
[ "0.6886174", "0.6035865", "0.5953554", "0.55916303", "0.5561849", "0.54923177", "0.5430941", "0.53742135", "0.51953775", "0.5164276", "0.51263016", "0.5078041", "0.507584", "0.5072293", "0.50700045", "0.5063474", "0.50496", "0.5039605", "0.5037979", "0.50145006", "0.49787346", "0.49525878", "0.49493375", "0.49403405", "0.49342653", "0.49341303", "0.4930725", "0.49249688", "0.4923769", "0.49161765" ]
0.8558664
0
Read and preprocess an image with data augmentation (random transform).
def read_for_training(p, augmentation=False): img = imread(TRAIN + p, mode='RGB') msk = img if mode == 'background': data = {'image': img} elif mode == 'instance' or mode == 'code': msk = imread(TRAIN_MASK + p.replace('.jpg', '.png')) data = {'image': img, 'mask': msk} if augmentation: data_aug = strong_aug()(**data) img = data_aug['image'] if 'mask' in data_aug: msk = data_aug['mask'] if mode == 'instance' or mode == 'code': img[~msk.astype(np.bool)] = 0 img, msk = size_normalization(img, msk) if mode == 'code': img = encode(img, msk) return img, msk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_", "def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image", "def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)", "def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def caffe_preprocess(img):\n out = numpy.copy(img)\n out -= IMAGE_256_MEAN[None, None, :]\n out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR\n return out", "def preprocess(img):\n # standard mean and std for the model\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n # resize\n img = img.resize(size = (224, 224))\n # transforms to numpy\n img = np.array(img, dtype = np.float64)\n # Mean and Std\n img = (img - mean)/std\n # transpose [channels first]\n img = img.transpose((2, 0, 1))\n # conver to Tensor\n img = torch.from_numpy(img)\n return img", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def load_and_prep_image(img, img_shape=224):\n\n # Decode the read file into a tensor & ensure 3 colour channels \n # (our model is trained on images with 3 colour channels and sometimes images have 4 colour channels)\n img = tf.image.decode_image(img, channels=3)\n\n # Resize the image (to the same size our model was trained on)\n img = tf.image.resize(img, size = [img_shape, img_shape])\n\n # Rescale the image (get all values between 0 and 1)\n img = img/255.\n return img", "def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def chainercv_preprocess(image):\n image = skio.imread(image)\n image = image.transpose(2, 0, 1)\n return [image]", "def preprocess_example_input(input_config):\n\n input_path = input_config[\"input_path\"]\n input_shape = input_config[\"input_shape\"]\n one_img = imread(input_path)\n if \"normalize_cfg\" in input_config.keys():\n normalize_cfg = input_config[\"normalize_cfg\"]\n mean = np.array(normalize_cfg[\"mean\"], dtype=np.float32)\n std = np.array(normalize_cfg[\"std\"], dtype=np.float32)\n one_img = imnormalize(one_img, mean, std)\n one_img = imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)\n (_, C, H, W) = input_shape\n one_meta = {\n \"img_shape\": (H, W, C),\n \"ori_shape\": (H, W, C),\n \"pad_shape\": (H, W, C),\n \"filename\": \"<demo>.png\",\n \"scale_factor\": 1.0,\n \"flip\": False,\n }\n\n return one_img, one_meta", "def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image", "def preprocess_image(image, label, is_training):\n if is_training:\n # Randomly scale the image and label.\n image, label = preprocessing.random_rescale_image_and_label(\n image, label, _MIN_SCALE, _MAX_SCALE)\n\n # Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.\n image, label = preprocessing.random_crop_or_pad_image_and_label(\n image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)\n\n # Randomly flip the image and label horizontally.\n image, label = preprocessing.random_flip_left_right_image_and_label(\n image, label)\n\n image.set_shape([_HEIGHT, _WIDTH, 3])\n label.set_shape([_HEIGHT, _WIDTH, 1])\n print(\"seg11111111111\",image,label)\n image = preprocessing.mean_image_subtraction(image)\n\n return image, label", "def data_tf(img):\n img = crop.process(img)\n img = random_contrast.process(img)\n img = random_brightness.process(img)\n img = random_color.process(img)\n img = random_sharpness.process(img)\n if img.size[1] >= 32:\n img = compress.process(img)\n img = adjust_resolution.process(img)\n img = blur.process(img)\n img = exposure.process(img)\n # img = rotate.process(img)\n img = salt.process(img)\n img = inverse_color(img)\n img = stretch.process(img)\n return img", "def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")", "def preprocess(self, img):\n return img - np.mean(img)", "def image_preprocessing(img):\n\n # Removing parasite data (sky, trees and front of the car)\n return img[60:-20, :, :]", "def preprocess_image(image, is_training):\n if is_training:\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_image_with_crop_or_pad(\n image, _IMAGE_SIZE + 8, _IMAGE_SIZE + 8)\n\n # Randomly crop a [_HEIGHT, _WIDTH] section of the image.\n image = tf.random_crop(image, [_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n return image", "def load_and_preprocess_image(path, max_dim=512):\n f = tf.io.read_file(path)\n img = tf.io.decode_image(f)\n img = resize_min(img, max_dim)\n img = tf.expand_dims(img, axis=0)\n img = vgg_preprocess_input(img)\n return img", "def load_image(path, preprocess=True):\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n return x", "def preprocess(image):\n return image - MEAN_PIXEL", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def preprocess(img_name, quality=None):\n img = plt.imread(img_name)\n Y, X, C = img.shape\n img = img[:Y-Y%2, :X-X%2, :3]\n if quality is not None:\n img = jpeg_compress(img, quality)\n img = img_to_tensor(img).cuda().type(torch.float)\n return img", "def preprocess(file_path, model_preprocess_function):\n img = image.load_img(file_path, target_size=(224, 224))\n x = image.img_to_array(img)\n # x = np.expand_dims(x, axis=0)\n x = model_preprocess_function(x)\n return x", "def preprocess_image(image, training):\n ### YOUR CODE HERE\n\n if training:\n \n # Resize the image to add four extra pixels on each side.\n # image = tf.image.resize_image_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = np.pad(image, ((4,4),(4,4),(0,0)), 'constant')\n \n # Randomly crop a [32, 32] section of the image.\n # image = tf.random_crop(image, [32, 32, 3])\n _x_axis = np.random.randint(9)\n _y_axis = np.random.randint(9)\n image = image[_x_axis:_x_axis+32, _y_axis:_y_axis+32, :]\n \n # Randomly flip the image horizontally.\n # image = tf.image.random_flip_left_right(image)\n if np.random.randint(2) == 1:\n image = np.flip(image, axis=0)\n\n # Subtract off the mean and divide by the standard deviation of the pixels.\n # image = tf.image.per_image_standardization(image)\n mean = np.mean(image)\n adjusted_stddev = max(np.std(image), 1.0/np.sqrt(3072))\n image = (image - mean) / adjusted_stddev\n ### END CODE HERE\n\n return image", "def preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3, dct_method=\"INTEGER_ACCURATE\")\n return scale(image)", "def preprocess_image(image, training):\r\n if training:\r\n ### YOUR CODE HERE\r\n hpad = np.zeros((32,4,3))\r\n image = np.hstack((image,hpad))\r\n image = np.hstack((hpad,image))\r\n\r\n vpad = np.zeros((4,40, 3))\r\n image = np.vstack((image, vpad))\r\n image = np.vstack((vpad, image))\r\n\r\n #print(np.shape(image))\r\n # Resize the image to add four extra pixels on each side.\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly crop a [32, 32] section of the image.\r\n # HINT: randomly generate the upper left point of the image\r\n rx = np.random.randint(8)\r\n ry = np.random.randint(8)\r\n crp_img = image[rx:rx+32,ry:ry+32,:]\r\n #print(np.shape(crp_img))\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly flip the image horizontally.\r\n # for i in range(crp_img.shape[0]):\r\n # crp_img[i] = np.fliplr(crp_img[i])\r\n rf = np.random.randint(2)\r\n if(rf == 0):\r\n crp_img = np.fliplr(crp_img)\r\n #print(np.shape(crp_img))\r\n image = crp_img\r\n\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Subtract off the mean and divide by the standard deviation of the pixels.\r\n cmean = []\r\n cstd = []\r\n for i in range(np.shape(image)[2]):\r\n arr = image[:,:,i]\r\n cmean = np.mean(arr)\r\n cstd = (np.std(arr))\r\n lfn = lambda x : (x-cmean)/cstd\r\n image[:,:,i] = lfn(arr)\r\n #print(np.shape(image))\r\n\r\n ### YOUR CODE HERE\r\n\r\n return image", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def preprocess(img):\n if img.ndim != 3:\n raise TypeError('bad ndim of img')\n if img.dtype != np.uint8:\n raise TypeError('bad dtype of img')\n img = cv2.resize(img, (224, 224))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img *= (2.0/255) # normalize to: 0.0~2.0\n img -= 1.0 # subtract mean to make it: -1.0~1.0\n img = np.expand_dims(img, axis=0)\n return img" ]
[ "0.6944267", "0.6885695", "0.6801366", "0.6746409", "0.6730426", "0.6700084", "0.6674317", "0.6642256", "0.6604827", "0.65799797", "0.65796673", "0.6575271", "0.6572347", "0.6542871", "0.6535678", "0.6521868", "0.64719284", "0.64391285", "0.6430673", "0.6425564", "0.6418553", "0.63944936", "0.6359762", "0.63452995", "0.6345004", "0.63389987", "0.6315763", "0.6315252", "0.6310417", "0.6308325" ]
0.70998
0
Amount which will increase x until it's divisible evenly by 64
def padlen_64(x: int): return (64 - (x % 64)) % 64
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modReduce(self, x):\n\n assert 0 <= x < pow(self.mod, 2), 'out of range.'\n q = (x * self.u) >> (2 * self.M_bit)\n r = x - q * self.mod\n while r >= self.mod:\n r -= self.mod\n return r", "def taille(x):\n n = 0\n \n while (2**n) -1 < x :\n n+=1\n \n return(n)", "def foo_4(x):\n\tresult=1\n\tfor i in range(1, x+1):\n\t\tresult=result * i\n\treturn result", "def bulk_modulus():\n\n return 10000.0", "def mod_5(x):\r\n return x%5", "def pseudo(x,N) :\n\treturn (x**2+1)%N", "def mod_5(x):\n return x % 5", "def mod_5(x):\n return x % 5", "def enlarge(n):\n\n return n* 100", "def enlarge(n):\n return n*100", "def enlarge(n):\r\n return n * 100", "def num (x):\n\n if not x:\n return None\n seed = 1\n scale = Fraction(1,1)\n lone = None\n num = Fraction(0,1)\n while not x == seed:\n\n if not lone and le(_abs(sub(x,seed)),pos):\n lone = True\n if le(seed,x):\n seed = seed*2+1\n num += scale\n lone = lone or le(x,seed)\n else:\n seed = seed*2\n num -= scale\n lone = lone or le(seed,x)\n if lone:\n scale *= Fraction(1,2)\n return num", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum", "def five():\r\n \r\n n = 20\r\n divisible = False\r\n \r\n while divisible == False:\r\n n += 20\r\n divisible = True\r\n for i in range(20, 0, -1):\r\n if n % i != 0:\r\n divisible = False\r\n break\r\n return n", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def question_29(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= question_29(i)\n x -= 1\n return result * 2 * 1", "def question_30(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= 2 ** question_30(i)\n x -= 1\n return result", "def factorial(x):\r\n res = 1\r\n for i in range (1, x+1)\r\n res *= i\r\n return res", "def multiply_by_4(x):\n\treturn int(x) * 4", "def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1", "def _next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()", "def factorial(x):\r\n output = 1\r\n for factor in range(2,x+1):\r\n output = output * factor\r\n return output", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def my_func(x, y):\n result = 0\n pow_res = 1\n while y:\n pow_res= pow_res*x\n y +=1\n\n result = 1 / pow_res\n\n\n return result", "def r1(x, n, max_size=32):\n return (x << n) % (2 << (max_size - 1)) + (x >> (max_size - n))", "def modulus_raknare(steps):\n i = 0\n\n def next_step():\n nonlocal i\n i = (i + 1) % steps\n return i\n return next_step", "def nextpow2(x):\n log2_n = math.ceil(math.log2(x))\n n = 2 ** log2_n\n return n", "def prime_counting_function_inv(y):\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x", "def one():\r\n \r\n i = 1\r\n sum = 0\r\n while i < 1000:\r\n if i % 3 == 0 or i % 5 == 0:\r\n sum = sum + i\r\n i = i + 1\r\n else:\r\n i = i + 1\r\n return sum" ]
[ "0.64682055", "0.6353813", "0.63322943", "0.6308356", "0.6298304", "0.62941134", "0.61767954", "0.61767954", "0.61669123", "0.61343306", "0.61135375", "0.60509366", "0.59431607", "0.5926209", "0.5924908", "0.5908529", "0.5902806", "0.5896249", "0.58734715", "0.5857224", "0.5839226", "0.58385986", "0.5828709", "0.58237207", "0.5820636", "0.5814938", "0.58137983", "0.5798882", "0.57957083", "0.5790856" ]
0.6562799
0
Turns this userreadable string into an Alternative (no escaping)
def from_str(cls, encstr: str) -> 'Alternative': encstr = re.sub(r'\s+', '', encstr) return cls(*re.split('([' + string.punctuation + '])', encstr, maxsplit=1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def _hidden_in_unicode(self, txt):", "def random_alternative(self, fmt_string):\n # Find alternatives\n try:\n alts = self[fmt_string]\n except KeyError:\n # There are no alternatives for this string\n return fmt_string\n return random.choice(alts)", "def unicodise_safe(string, encoding = None):\n\n\treturn unicodise(deunicodise(string, encoding), encoding).replace(u'\\ufffd', '?')", "def escapeDecode(s: unicode) -> unicode:\n ...", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def autoconvert(s):\n try:\n return eval(s)\n except:\n return s", "def unstringify(cls, s: str, unescape_pipe: bool = True)->str:\n if s.startswith(cls.LANGUAGE_QUALIFIED_STRING_SIGIL):\n language: str\n s, language = s.rsplit(\"@\", 1)\n if unescape_pipe:\n s = s.replace('\\\\|', '|')\n return ast.literal_eval(s)", "def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output", "def polite_string(a_string):\n if is_py3() and hasattr(a_string, 'decode'):\n try:\n return a_string.decode('utf-8')\n except UnicodeDecodeError:\n return a_string\n\n return a_string", "def normalize_repr(item_repr):\n return DEFAULT_REPR_RE.sub('', item_repr)", "def rl_unescape_prompt(prompt: str) -> str:\n if rl_type == RlType.GNU:\n escape_start = \"\\x01\"\n escape_end = \"\\x02\"\n prompt = prompt.replace(escape_start, \"\").replace(escape_end, \"\")\n\n return prompt", "def parse(s):\n return s", "def _sanitize_string_for_python(self, s):\n s = repr(s)\n\n if s.startswith('u'):\n s = s[1:]\n\n return s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def __normalize_string(self, string):\n\n if self._dia & self._DIA_PRE93:\n string = string.replace(u\"Â\", u\"Î\")\n string = string.replace(u\"ROMÎNĂ\", u\"ROMÂNĂ\")\n elif self._dia & self._DIA_POST93:\n string = string.replace(u\"Î\", u\"Â\")\n string = string.replace(u\"Â \", u\"Î\")\n\n if self._dia & self._DIA_CEDILLA:\n string = string.replace(u\"Ș\", u\"Ş\")\n string = string.replace(u\"Ț\", u\"Ţ\")\n elif self._dia & self._DIA_COMMA:\n string = string.replace(u\"Ş\", u\"Ș\")\n string = string.replace(u\"Ţ\", u\"Ț\")\n\n if self._dia & self._DIA_NONE:\n string = string.replace(u\"Î\", u\"I\")\n string = string.replace(u\"Â\", u\"A\")\n string = string.replace(u\"Ă\", u\"A\")\n string = string.replace(u\"Ș\", u\"S\")\n string = string.replace(u\"Ț\", u\"T\")\n\n return string", "def normalize(self, text: str) -> str:", "def raw(string):\n string = string or \"\"\n return string.replace(\"{\", \"{{\").replace(\"|\", \"||\")", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def optionxform(self, optionstr):\r\n return optionstr", "def optionxform(self, optionstr):\r\n return optionstr", "def convert_text(s):\n for d in config.repl: # loaded from config.py\n if \"flags\" in d:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s, flags=d[\"flags\"])\n else:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s)\n return s", "def haiku_string_parser():\n pass", "def fix_output(text: str) -> str:\n\n text = text.replace(\" n't\", \"n't\")\n return text", "def polishString(s): \n return re.sub(\"[/\\\\\\?\\|<>:\\\"\\*]\",\"_\",s).strip()", "def from_dual(self):\n return \"\"", "def beautify(self, string):\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t# string may differ because of escaped characters\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)" ]
[ "0.5630695", "0.5217001", "0.5198397", "0.5186945", "0.5184185", "0.51392734", "0.51312214", "0.50683737", "0.50631917", "0.5061903", "0.5048459", "0.50467837", "0.50248337", "0.49875286", "0.4931769", "0.49110556", "0.48986942", "0.48867592", "0.48742884", "0.4862726", "0.4840829", "0.48408157", "0.48179346", "0.48179346", "0.4807754", "0.47804108", "0.4769348", "0.47580546", "0.47570357", "0.47521055" ]
0.5572365
1
Pull a Restriction from encoded string, return remainder
def decode(cls, encstr: str) -> Tuple['Restriction', str]: alts = [] while len(encstr) != 0: if encstr.startswith('&'): encstr = encstr[1:] break alt, encstr = Alternative.decode(encstr) alts.append(alt) return cls(alts), encstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n return ret", "def parse_mask(string):\n return string.split(' = ')[1]", "def process_restriction(restriction):\n if not restriction:\n return ''\n else:\n res = restriction.lower()\n if res == 'eaff':\n return '-eAFF'\n elif res == 'ey2h':\n return '-eY2H'\n elif res == 'y2h':\n return '-rY2H'\n elif res == 'aff':\n return '-rAFF'\n else:\n raise network_generation.IncorrectRestrictionType(res)", "def decode(self, s):", "def decode(self, s):", "def extract_critic_input(self, data):\n return data[1]", "def decode_match(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n return MatchProxy(bin)", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def decode_string(self, value):\r\n return value", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def minisat_decode(clause_str):\n factor = ClauseVariable.encoding_factor()\n int_value = int(clause_str)\n compliment = (int_value < 0)\n int_value = abs(int_value)\n position = (int_value % factor) -1\n vertex = math.ceil(int_value/factor)-1\n return ClauseVariable(compliment,vertex,position)", "def string_bits(myStr):\n\n other = myStr[::2] \n \n return other", "def match(code, x):\n return decode(code)(x)", "def _mb_substr(string, start, length):\n return string.decode(_ENCODING)[start: start + length]", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n if end != len(s):\n raise ValueError(errmsg(\"Extra data\", s, end, len(s)))\n return obj", "def decode_result(found):\n ...", "def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]", "def auth_sub_string_from_body(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Token='):\n # Strip off Token= and return the token value string.\n return response_line[6:]\n return None", "def decode(self, s, _w=WHITESPACE.match):\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n end = _w(s, end).end()\n\n return obj", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def decode(self, encoded):", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def decode(self, byteString):\n decoded = ''\n portion_left = byteString\n while len(portion_left) > 0:\n substr_len = 1\n symbol = None\n while (symbol == None) and (substr_len <= len(portion_left)):\n symbol = self.decode_symbol(portion_left[:substr_len])\n substr_len += 1\n\n if symbol == None:\n print \"decode failed:\"\n print \"decoded: \" + decoded\n print \"left: \" + portion_left\n return None\n\n decoded += symbol\n #print \"decoded: _\" + symbol + \"_\"\n portion_left = portion_left[substr_len-1:]\n\n return decoded", "def get_strInstructions(json):\n\n strInstructions = introcs.find_str(json,'\"strInstructions\"')\n\n\n string = json[strInstructions+17:]\n\n\n result = first_inside_quotes(string)\n\n\n return result", "def parse_special_word(s):\n index1 = s.find(special_word_marker)\n if index1 != -1:\n index2 = s.find(special_word_marker, index1 + 1)\n if index2 != -1:\n sw = normalize(s[index1+len(special_word_marker) : index2])\n rest = normalize(s[index2+len(special_word_marker) :])\n return sw, rest\n return None, s", "def _DecodeAccidentalString(cls, sAccidental):\n sAcc = sAccidental.strip()\n # Strip non-accidental content\n index = 0\n for n in range(len(sAcc)):\n if (sAcc[n] not in cls.lilyFlat) and (sAcc[n] not in cls.lilySharp):\n index = n\n break\n sAcc = sAcc[:index]\n encFlat = cls.encodingAccidentals.get(cls.lilyFlat, None)\n encSharp = cls.encodingAccidentals.get(cls.lilySharp, None)\n if cls._reFlat.match(sAcc):\n return encFlat*len(sAcc)//2\n elif cls._reSharp.match(sAcc):\n return encSharp*len(sAcc)//2\n else:\n return 0", "def selector(string,key,lkey,lval):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip+lkey:ip+lkey+lval]\n print 'velue = ',value\n else:\n value = 'none'\n \n return value", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)" ]
[ "0.6682689", "0.5536616", "0.54848987", "0.5409332", "0.5409332", "0.5249636", "0.52139443", "0.51220363", "0.5092601", "0.5083486", "0.50243723", "0.49601397", "0.4911395", "0.48801553", "0.48710787", "0.48674506", "0.48674506", "0.48620152", "0.4848582", "0.4832647", "0.4799203", "0.4796334", "0.47760275", "0.4772739", "0.47624645", "0.4756798", "0.47210005", "0.4720052", "0.4716607", "0.47060084" ]
0.61234236
1
Returns a Restriction from an escaped string (ignoring whitespace)
def from_str(cls, encstr: str) -> 'Restriction': encstr = re.sub(r'\s+', '', encstr) ret, remainder = cls.decode(encstr) if len(remainder) != 0: raise ValueError("Restriction had extrs characters at end: {}" .format(remainder)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_regex(literal, regex):\n if regex:\n return regex.lower().strip()\n else:\n return r'\\b%s\\b'%literal.lower().strip()", "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def lisp_string(python_string):\n return '\"%s\"' % python_string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def typeify(s):\n try:\n return literal_eval(s)\n except:\n return s", "def munge_condition_str(s):\n return s.replace(' and ', ' and\\n').replace(' or ', ' or\\n')", "def restricted_string_type(\n name: str,\n regex: Union[str, Pattern],\n docstring: Optional[str] = None,\n) -> type:\n if isinstance(regex, str):\n regex = re.compile(regex)\n expression = \"matching \" + regex.pattern\n\n extra_attrs = {\n \"_regex\": regex,\n \"_expression\": expression,\n \"_type\": str,\n }\n\n def check_value(cls, v):\n if not cls._regex.match(v):\n raise ValueError(f\"{v} does not match regular expression {cls._regex.pattern}\")\n\n return create_type(\n name=name,\n base_type=str,\n check_value=check_value,\n register_key=(expression, str),\n docstring=docstring,\n extra_attrs=extra_attrs,\n )", "def _get_legal(token):\n valid = re.split(r'[^]a-zA-Z0-0![,. {}@#$%^&*-_+=;:<>?/~\\'\\\\`]', token)\n return ''.join(valid).strip()", "def process_restriction(restriction):\n if not restriction:\n return ''\n else:\n res = restriction.lower()\n if res == 'eaff':\n return '-eAFF'\n elif res == 'ey2h':\n return '-eY2H'\n elif res == 'y2h':\n return '-rY2H'\n elif res == 'aff':\n return '-rAFF'\n else:\n raise network_generation.IncorrectRestrictionType(res)", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def decode(cls, encstr: str) -> Tuple['Restriction', str]:\n alts = []\n while len(encstr) != 0:\n if encstr.startswith('&'):\n encstr = encstr[1:]\n break\n alt, encstr = Alternative.decode(encstr)\n alts.append(alt)\n return cls(alts), encstr", "def eval_cast(string):\n\n return W.string_eval_expression(string, {}, {}, {})", "def string_with_double_quote_pattern_validate_regular_expression(cls, value):\n if value is None:\n return value\n\n if not re.match(r\"this is \\\"something\\\"\", value):\n raise ValueError(r\"must validate the regular expression /this is \\\"something\\\"/\")\n return value", "def quote(s):\n return unescape(quoteattr(s))", "def AsRegEx(self):\n parts = _REGEX_SPLIT_PATTERN.split(self._value)\n result = u\"\".join(self._ReplaceRegExPart(p) for p in parts)\n\n return rdf_standard.RegularExpression(u\"(?i)\\\\A%s\\\\Z\" % result)", "def from_string(self, regex_str: str):\n return RegexReader(regex_str)", "def normalise_string(string):\n # Disallow spaces\n if ' ' in string:\n raise PydmrsValueError('Predicates must not contain spaces')\n # Strip surrounding quotes and disallow other quotes\n if string[0] == '\"' and string[-1] == '\"':\n string = string[1:-1]\n if string[0] == \"'\":\n warn('Predicates with opening single-quote have been deprecated', PydmrsDeprecationWarning)\n string = string[1:]\n if '\"' in string:\n raise PydmrsValueError('Predicates must not contain quotes')\n # Force lower case\n if not string.islower():\n warn('Predicates must be lower-case', PydmrsWarning)\n string = string.lower()\n # Strip trailing '_rel'\n if string[-4:] == '_rel':\n string = string[:-4]\n \n return string", "def decode(cls, encstr: str) -> Tuple['Alternative', str]:\n cond = None\n end_off = 0\n\n # Swallow field up to conditiona\n while end_off < len(encstr):\n if encstr[end_off] in string.punctuation:\n cond = encstr[end_off]\n break\n end_off += 1\n if cond is None:\n raise ValueError('{} does not contain any operator'\n .format(encstr))\n field = encstr[:end_off]\n end_off += 1\n\n value = ''\n while end_off < len(encstr):\n if encstr[end_off] == '|':\n # We swallow this\n end_off += 1\n break\n if encstr[end_off] == '&':\n break\n if encstr[end_off] == '\\\\':\n end_off += 1\n value += encstr[end_off]\n end_off += 1\n\n return cls(field, cond, value), encstr[end_off:]", "def _MakeRE(regex_str):\n return re.compile(regex_str.format(**SHORTHAND))", "def reg_name(nstr:str) -> object :\r\n\r\n elements=nstr.split(\" \")\r\n combs=word_combination(elements)\r\n lregex=[]\r\n for comb in combs :\r\n if len(comb) > 1 :\r\n lregex.append(\"(?i:{})\".format('[\\.\\- _,;:]?'.join(comb))) #Here to change character seperation between the words\r\n elif len(comb) == 1 :\r\n lregex.append(\"(?i:{})\".format(comb))\r\n else :\r\n pass\r\n \r\n return re.compile('({})'.format(\"|\".join(lregex)))", "def safe_str(self, string):\n return self.db.escape_string(string)", "def from_string(string, _or=''):\n if _or:\n and_or = 'or'\n else:\n and_or = ''\n return Input(string, and_or=and_or)", "def _eval(s):\n l = []\n safechars = ('/', '+', '-', '*', '.', ')', '(')\n for c in s:\n if c.isdigit() or c in safechars:\n l.append(c)\n return eval(''.join(l))", "def test_parse_quotes_doublequote(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a (\\\") character\")):\n api.parse_quote(\" This is a quote\\\". | Author | Publication | tag1, tag2 , tag3 \",\n simple_format=False)", "def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def parse_string_2(string):\n string = re.sub(r\"\\'\", \"\", string)\n return string.strip().lower()", "def validate_safe_string(value):\n # The following strings are explicitly allowed, despite having otherwise-illegal chars.\n legal_strings_with_special_chars = frozenset({'@rid', '@class', '@this', '%'})\n\n if not isinstance(value, six.string_types):\n raise TypeError(u'Expected string value, got: {} {}'.format(\n type(value).__name__, value))\n\n if not value:\n raise GraphQLCompilationError(u'Empty strings are not allowed!')\n\n if value[0] in string.digits:\n raise GraphQLCompilationError(u'String values cannot start with a digit: {}'.format(value))\n\n if not set(value).issubset(VARIABLE_ALLOWED_CHARS) and \\\n value not in legal_strings_with_special_chars:\n raise GraphQLCompilationError(u'Encountered illegal characters in string: {}'.format(value))", "def sanitise(string: str) -> str:\n return \"_\".join(re.findall(re.compile(\"[^ @&()/]+\"), string))", "def test_bug_652575():\n assert _do_test_raw(\"var x = 'capability.policy.';\").failed()" ]
[ "0.5168612", "0.5026671", "0.49126112", "0.48980483", "0.4877988", "0.485538", "0.48332566", "0.48306048", "0.4796996", "0.47644973", "0.4762547", "0.47521722", "0.4743869", "0.47309223", "0.47302634", "0.4718924", "0.47140232", "0.47117367", "0.4668038", "0.46525672", "0.46359733", "0.4625946", "0.4619906", "0.46132216", "0.46042398", "0.4586213", "0.4583964", "0.4576204", "0.45618618", "0.45545098" ]
0.6378927
0
Tests the restrictions against the values dict given. Normally values are treated strings, but conditions only work if they're actually integers. Returns (True, '') if everything is good. Otherwise, returns (False, reasonstring)
def are_restrictions_met(self, values: Dict[str, Any]) -> Tuple[bool, str]:, for r in self.restrictions: reasons = r.test(values) if reasons is not None: return False, reasons return True, ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, values: Dict[str, Any]) -> Optional[str]:\n # This is always True\n if self.cond == '#':\n return None\n\n def why(cond, field, explanation) -> Optional[str]:\n if cond:\n return None\n return '{}: {}'.format(field, explanation)\n\n # If it's missing, it's only True if it's a missing test.\n if self.field not in values:\n # Default to ignoring id field as long as no version.\n if self.field == '':\n return why('-' not in self.value, 'id', 'unknown version {}'.format(self.value))\n return why(self.cond == '!', self.field, 'is missing')\n\n # If they supply a function, hand it to them.\n if callable(values[self.field]):\n return values[self.field](self)\n\n val = str(values[self.field])\n if self.cond == '!':\n return why(False, self.field, 'is present')\n elif self.cond == '=':\n return why(val == self.value,\n self.field,\n '!= {}'.format(self.value))\n elif self.cond == '/':\n return why(val != self.value,\n self.field,\n '= {}'.format(self.value))\n elif self.cond == '^':\n return why(val.startswith(self.value),\n self.field,\n 'does not start with {}'.format(self.value))\n elif self.cond == '$':\n return why(val.endswith(self.value),\n self.field,\n 'does not end with {}'.format(self.value))\n elif self.cond == '~':\n return why(self.value in val,\n self.field,\n 'does not contain {}'.format(self.value))\n elif self.cond == '<':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int < restriction_val,\n self.field,\n \">= {}\".format(restriction_val))\n elif self.cond == '>':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int > restriction_val,\n self.field,\n \"<= {}\".format(restriction_val))\n elif self.cond == '{':\n return why(val < self.value,\n self.field,\n 'is the same or ordered after {}'.format(self.value))\n elif self.cond == '}':\n return why(val > self.value,\n self.field,\n 'is the same or ordered before {}'.format(self.value))\n else:\n # We checked this in init!\n assert False", "def test(self, values: Dict[str, Any]) -> Optional[str]:\n reasons = []\n for alt in self.alternatives:\n reason = alt.test(values)\n if reason is None:\n return None\n reasons.append(reason)\n\n return \" AND \".join(reasons)", "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def __allowed_values_inccorrect_string(self):\n strTestName = 'Values of a string (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n\n RxCSObject.paramAddMan('parameter3', 'string')\n RxCSObject.paramType('parameter3', str)\n RxCSObject.paramAllowed('parameter3', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 21\n RxCSObject.parameter3 = 'Allowed string #3'\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def sanitize_input(ll):\n p = sum([l[1] for l in ll])\n if not all([l[0] == int(l[0]) for l in ll]):\n if round(p, 5) != 1:\n return \"It's not a valid distribution and furthermore, one or more variable value are not integers\"\n else:\n return \"All the variable values should be integers\"\n if round(p, 5) != 1:\n return \"It's not a valid distribution\"", "def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if value < self.min_val or value > self.max_val:\n return f'The value \"{value}\" must be in range <{self.min_val}, {self.max_val}>.'\n return True", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def filter_cond(line_dict):\n if(line_dict[\"if1\"] == ''):\n return False\n cond_match = (\n (int(line_dict[\"if1\"]) > 20 and int(line_dict[\"if1\"]) < 40)\n ) \n return True if cond_match else False", "def check_value(self, value):", "def _check_values(self, values):\n mod = []\n for k, v in values.items():\n if isinstance(v, str) and \"'\" in v:\n mod.append(k)\n if len(mod) == 0:\n return values\n else:\n values = copy.copy(values)\n for k in mod:\n values[k] = values[k].replace(\"'\", \"''\")\n return values", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def validMisc(self, p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity):\n checks = 0;\n for val in [p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity]:\n if val is None:\n checks += 1;\n #check p_lowerRateLim\n if not (p_lowerRateLim is None):\n for valid in frange(30,50,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(51,90,1):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n for valid in frange(95,175,5):\n if p_lowerRateLim == valid:\n checks += 1;\n break\n #check p_upperRateLim\n if not (p_upperRateLim is None):\n for valid in frange(50,175,5):\n if p_upperRateLim == valid: #and p_upperRateLim >= p_lowerRateLim: #need to implement\n checks += 1;\n break\n if not (p_modulationSensitivity is None):\n if p_modulationSensitivity > 0 and p_modulationSensitivity <= 16:\n if p_modulationSensitivity is int:\n checks += 1;\n if not (p_fixedAVDelay is None):\n for valid in frange(70,300,10):\n if valid == p_fixedAVDelay:\n checks += 1\n if checks == 4:\n return True\n return False", "def check_value(self, key: str, value: Any):\n # Check the value with a set of tests\n self._check_missing(key, value)\n self._check_allowed_values(key, value)\n self._check_data_type(key, value)\n self._check_value_range(key, value)", "def validate_strength(cls, value: str) -> (bool, dict):\n if value is None:\n return False, {}\n\n length = cls._validate_length(value)\n digit = cls._validate_digit(value)\n uppercase = cls._validate_uppercase(value)\n lowercase = cls._validate_lowercase(value)\n symbol = cls._validate_symbol(value)\n\n valid = all([length, digit, uppercase, lowercase, symbol])\n error_dict = {\n 'length': length,\n 'digit': digit,\n 'uppercase': uppercase,\n 'lowercase': lowercase,\n 'symbol': symbol,\n }\n\n return valid, error_dict", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def a_list(test_val: object, test_col: object, valid_values: object) -> object:\n tv_upper = test_val.upper()\n rc: bool = True\n # noinspection PyTypeChecker\n value_list = [x[test_col] for x in valid_values]\n value_list_upper = [x.upper() for x in value_list]\n if tv_upper not in value_list_upper:\n print(f'{test_val} is invalid. Valid values are {str(value_list)}')\n rc = False\n return rc", "def validate_dict(types,val,allowed,typ):\n if not len(types): return TYPE_MISMATCH\n if str(type(val)) not in typ['list']: raise(Exception('unknown type'))\n for k,v in val.items():\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])" ]
[ "0.67283416", "0.66373456", "0.63170063", "0.6068614", "0.60020655", "0.5946913", "0.57890636", "0.5788082", "0.57537967", "0.57021195", "0.56713057", "0.5626302", "0.5622262", "0.5609995", "0.5586753", "0.55860853", "0.5585117", "0.5569001", "0.5539537", "0.5532167", "0.5508965", "0.5501328", "0.5495712", "0.54921186", "0.54814786", "0.5481348", "0.5475239", "0.5473147", "0.54602677", "0.5422842" ]
0.76134247
0
Allinone check that a runestring is valid, derives from this MasterRune and passes all its conditions against the given dictionary of values or callables
def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: try: rune = Rune.from_base64(b64str) except: # noqa: E722 return False, "runestring invalid" if not self.is_rune_authorized(rune): return False, "rune authcode invalid" return rune.are_restrictions_met(values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n return MasterRune(secret).check_with_reason(b64str, values)", "def test_any_rune(self):\n rule = 'alert (name:\"rune\"; regex:\".{64}\";)'\n\n tests = {\n \"A\"*64: [\"proxying connection from\", \"INFO : filter matched: 'rune'\"],\n \"\\x90\"*64: [\"proxying connection from\"],\n }\n\n self.run_rules(rule, tests, echo=False)", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def _valid_(s) :\n return LoKi.Dicts.MCFinderDicts.valid (s)", "def is_key_valid(self,key):\n if not key or any(map(lambda s: s in key,space_chars))\\\n or any(map(lambda s: s in key,bad_chars)):\n return False \n return True", "def Check_is_valid(self, String):\r\n\r\n if self.Special_Names.__contains__(String):\r\n return False\r\n elif self.Special_Names_no_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_one_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(String):\r\n return False\r\n elif self.Data_types.__contains__(String):\r\n return False\r\n elif self.Registers.__contains__(String):\r\n return False\r\n elif self.Irvine32_functions.__contains__(String):\r\n return False\r\n elif String.__contains__('\"'):\r\n return False\r\n elif String.__contains__('\\''):\r\n return False\r\n elif String.__contains__('.'):\r\n return False\r\n elif String[0].isdecimal():\r\n return False\r\n if len(self.Data_variables) > 0:\r\n if self.Data_variables.__contains__(String):\r\n return False\r\n if len(self.Functions_names) > 0:\r\n if self.Functions_names.__contains__(String):\r\n return False\r\n if len(self.Labels_names) > 0:\r\n if self.Labels_names.__contains__(String):\r\n return False\r\n return True", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def test_should_raise_in_case_of_wrong_characters(self):\n validator = CharCombinationValidator()\n\n regex = re.compile(r'[\\(\\[\\{]\\)\\]\\}')\n forbidden_chars = regex.sub('', punctuation)\n for char in forbidden_chars:\n with self.assertRaises(FormulaValidationError):\n validator('Fe(O)2%s' % char)", "def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def validate(cls, tab_dict, raise_error=True):\r\n return key_checker(['type'])(tab_dict, raise_error)", "def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def condition_is_valid(self):\n cond = self.condition.lower()\n cond = re.sub('\\s+', ' ', cond)\n\n for ap in self.available_functions:\n ap = ap.lower()\n\n ret = re.search(ap, cond)\n if ret:\n # [('a', 'b'), ('a', 'b'), ...]\n self.used_functions[ap] = re.findall(ap, cond)\n cond = re.sub(ap, ' ', cond)\n\n # print self.used_functions\n for op in ['and', 'or', 'not']:\n cond = re.sub('\\s%s\\s' % op, ' ', cond)\n\n cond = re.sub('\\(', '', cond)\n cond = re.sub('\\)', '', cond)\n cond = re.sub('\\s+', '', cond)\n\n return len(cond) == 0", "def test_accept_letter_unmasked_masked(self):\n # Prepare test\n letter_a = 'a'\n letter_b = 'b'\n self.console.in_valid_letter = MagicMock(return_value=letter_b)\n self.console.word.is_masked.side_effect = [False, True, StopIteration]\n\n # Run test\n result = self.console.accept_letter(letter_a)\n\n # Evaluate test\n self.assertEqual(letter_b, result)", "def is_valid(self, string) -> bool:\n while '()' in string or '{}' in string or '[]' in string:\n string = string.replace('()', '').replace('[]', '').replace('{}', '')\n\n return len(string) == 0", "def test_accept_letter_masked(self):\n # Prepare test\n letter = 'a'\n self.console.in_valid_letter = MagicMock(return_value='b')\n self.console.word.is_masked.return_value = True\n\n # Run test\n result = self.console.accept_letter(letter)\n\n # Evaluate test\n self.assertEqual(letter, result)", "def _validate_string(self, path, value, value_is_key=False):\r\n value = re.sub('[/$#{}._|*=\\-]', ' ', value)\r\n\r\n tokens = nltk.tokenize.word_tokenize(value)\r\n for raw_token in tokens:\r\n if raw_token.startswith(\"'\"):\r\n raw_token = raw_token[1:]\r\n if self.corpus.validate_token(raw_token):\r\n continue\r\n sub_tokens = Validator.camel_case_split(raw_token)\r\n ret = True\r\n for sub_token in sub_tokens:\r\n ret = ret and self.corpus.validate_token(sub_token)\r\n\r\n if not ret:\r\n self.errors.append({\r\n \"isKey\": value_is_key,\r\n \"path\": path,\r\n \"typo\": raw_token,\r\n })", "def _security_check_parameters(param_dict):\n for key, value in param_dict.iteritems():\n str_value = str(value) # Could easily be an int or a float\n for bad_str in [\";\", \"&&\", \">\", \"<\", \"|\"]:\n if bad_str in str_value:\n raise ValueError(\"Rejecting suspicious argument for %s\" % key)", "def test_valid_str(self):\n try:\n lowercase_validator('hg213i75%^&$efg')\n except ValidationError:\n self.fail('String raised ValidationError unexpectedly')", "def check_character(char, name, parameters):\r\n if char in name:\r\n raise NameError('Invalid character in the variable name: ' + name)\r\n\r\n # Make sure people don't include # within the name of parameters\r\n for item in parameters.keys():\r\n if char in item:\r\n raise NameError('Invalid character in the variable parameters: ' + item)", "def should_run(self, case: Tuple[Dict[str, Any], ...]) -> bool:\n return True", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n # \"\\b\" is a word boundary\n \"\"\"We need to exclude numbers starting with zero,\n as these are interpretted as base8 (octal). This in\n turn could cause interpretation errors, and exceptions\n (for example 09 is not octal and will throw and exception)\"\"\"\n except (ArithmeticError, SyntaxError):\n return False", "def check_redditor(self, args):\n\n for user in args.redditor:\n if any(char.isalpha() for char in user[1]) \\\n or self._illegal_chars.search(user[1]) != None \\\n or int(user[1]) == 0:\n raise ValueError", "def test_valid_alpha(alpha: Any) -> None:\n check_alpha(alpha=alpha)", "def test_allowed_string(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': 'female'}\n self.assertFalse(val.validate(document))", "def validate_strength(cls, value: str) -> (bool, dict):\n if value is None:\n return False, {}\n\n length = cls._validate_length(value)\n digit = cls._validate_digit(value)\n uppercase = cls._validate_uppercase(value)\n lowercase = cls._validate_lowercase(value)\n symbol = cls._validate_symbol(value)\n\n valid = all([length, digit, uppercase, lowercase, symbol])\n error_dict = {\n 'length': length,\n 'digit': digit,\n 'uppercase': uppercase,\n 'lowercase': lowercase,\n 'symbol': symbol,\n }\n\n return valid, error_dict" ]
[ "0.6080785", "0.5750079", "0.5702773", "0.5655834", "0.54935133", "0.5481613", "0.5304172", "0.52878016", "0.5247526", "0.5234183", "0.523374", "0.52119046", "0.5198196", "0.51774484", "0.5176261", "0.517611", "0.51729107", "0.5167192", "0.51573974", "0.51418495", "0.5130349", "0.5119607", "0.50747323", "0.5071726", "0.50523967", "0.50478077", "0.5045036", "0.5007374", "0.50046164", "0.5002741" ]
0.64300054
0
Convenience function that the b64str runestring is valid, derives from our secret, and passes against these values. If you want to check many runes, it's more efficient to create the MasterRune first then check them, but this is fine if you're only checking one.
def check_with_reason(secret: bytes, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]: return MasterRune(secret).check_with_reason(b64str, values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_with_reason(self, b64str: str, values: Dict[str, Any]) -> Tuple[bool, str]:\n try:\n rune = Rune.from_base64(b64str)\n except: # noqa: E722\n return False, \"runestring invalid\"\n if not self.is_rune_authorized(rune):\n return False, \"rune authcode invalid\"\n return rune.are_restrictions_met(values)", "def check(secret: bytes, b64str: str, values: Dict[str, Any]) -> bool:\n return check_with_reason(secret, b64str, values)[0]", "def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def is_rune_authorized(self, other: Rune) -> bool:\n # Make copy, as we're going to update state.\n sha = self.shabase.copy()\n totlen = self.seclen\n for r in other.restrictions:\n pad = end_shastream(totlen)\n sha.update(pad)\n totlen += len(pad)\n enc = bytes(r.encode(), encoding='utf8')\n sha.update(enc)\n totlen += len(enc)\n\n return other.authcode() == sha.digest()", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def validate_admin (admin_secret):\n\n try:\n admin_secret = admin_secret.encode()\n hashed = app.config['ADMIN_SECRET'].encode()\n return bcrypt.checkpw(admin_secret, hashed)\n\n except Exception as e:\n return False", "def is_right_secret(self, secret):\n dct = []\n for digit in secret:\n if digit in dct:\n return 0\n else:\n dct.append(digit)\n return True", "def validate_db_admin (db_secret):\n\n try:\n db_secret = db_secret.encode()\n hashed = app.config['DB_SECRET'].encode()\n return bcrypt.checkpw(db_secret, hashed)\n except Exception as e:\n return False", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def mnemonic_is_valid(mnemonic: str, wordlist=WORDLIST):\n try:\n mnemonic_to_bytes(mnemonic, wordlist=wordlist)\n return True\n except Exception as e:\n return False", "def ValidateEntry(Secret_Word_Masked, Secret_Word_Masked_Unspaced, Used_Char):\n Guess = input(\"\\nGuess A Letter: \")\n Guess = Guess.lower()\n\n while (len(Guess) > 1 or not Guess.isalpha()):\n print(\"\\nInvalid Entry: \\'%s\\'\" %Guess)\n print(\"Alphabetic Character(s) Already Used: %s\" %Used_Char)\n print(\"So Far The Secret Word is:\\n%s\" %Secret_Word_Masked)\n Guess = input(\"\\nPlease Enter Only A Single Alphabetic Character: \")\n Guess = Guess.lower()\n\n print(\"\\nValid Entry: \\'%c\\'\" %Guess)\n\n return Guess", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def test_accept_letter_unmasked_masked(self):\n # Prepare test\n letter_a = 'a'\n letter_b = 'b'\n self.console.in_valid_letter = MagicMock(return_value=letter_b)\n self.console.word.is_masked.side_effect = [False, True, StopIteration]\n\n # Run test\n result = self.console.accept_letter(letter_a)\n\n # Evaluate test\n self.assertEqual(letter_b, result)", "def test_accept_letter_masked(self):\n # Prepare test\n letter = 'a'\n self.console.in_valid_letter = MagicMock(return_value='b')\n self.console.word.is_masked.return_value = True\n\n # Run test\n result = self.console.accept_letter(letter)\n\n # Evaluate test\n self.assertEqual(letter, result)", "def is_secret_string(value):\n if not isinstance(value, basestring):\n return False\n return bool(_secret_string_pattern.match(value))", "def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))", "def check_redditor(self, args):\n\n for user in args.redditor:\n if any(char.isalpha() for char in user[1]) \\\n or self._illegal_chars.search(user[1]) != None \\\n or int(user[1]) == 0:\n raise ValueError", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def verify_pwd_str(provided_password: str, stored_hash: str) -> bool:\n salt = stored_hash[:64].encode('ascii')\n stored_password = stored_hash[64:]\n provided_password = provided_password.encode('utf-8')\n pwdhash = hashlib.pbkdf2_hmac('sha256', provided_password, salt, 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def test_different_seeds(self):\n\n test_string = \"just a string\"\n\n funcs = [\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n\n for func in funcs:\n self.assertNotEqual(func(test_string, 0), func(test_string, 1))", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))", "def self_check() -> None:\n assert len(ZBASE32_ALPHABET) == 32\n\n # Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs\n assert zbase32_encode(b\"asdasd\") == \"cf3seamuco\"\n assert zbase32_decode(\"cf3seamuco\") == b\"asdasd\"\n\n # Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/\n # assert zbase32_encode(hashlib.sha1(b\"yourmail\").digest()) == \"hacabazoakmnagxwmkjerb9yehuwehbm\"\n # -> this hash is wrong, and I don't know what username gives the SHA1\n # e61980e2f0c2962c19f45a928207e0472744702b\n\n # Test vector from https://metacode.biz/openpgp/web-key-directory\n assert zbase32_encode(hashlib.sha1(b\"test-wkd\").digest()) == \"4hg7tescnttreaouu4z1izeuuyibwww1\"\n\n # Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/\n assert (\n get_wkd_advanced_url(\"[email protected]\")\n == \"https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe\" # noqa\n )\n assert (\n get_wkd_direct_url(\"[email protected]\")\n == \"https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe\"\n )\n\n # Test vector from https://wiki.gnupg.org/WKD\n assert (\n get_wkd_direct_url(\"[email protected]\")\n == \"https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter\"\n )", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def test_correct_barcode(self):\r\n original = 'ATTTTTTTTTCG'\r\n recieved = 'ATTTTTTTTTTT'\r\n possibilities = ['TGTATTCGTGTA', 'ATTTTTTTTTCG', 'TGTAGGCGTGTA',\r\n 'TGTAGAAGTGTA', 'TGTAGGCGTATA', 'TGTAAAAAAAAA']\r\n decoded, num_errors = barcode.correct_barcode(recieved, possibilities)\r\n self.assertEqual(decoded, original)\r\n self.assertEqual(num_errors, 2)" ]
[ "0.67954326", "0.6076462", "0.56105393", "0.55704355", "0.5488138", "0.54298264", "0.5401579", "0.5401579", "0.5336335", "0.5291665", "0.5288441", "0.5265383", "0.522915", "0.5190011", "0.51790816", "0.51719004", "0.51499397", "0.51450264", "0.51145905", "0.50659394", "0.50631905", "0.5052787", "0.50197434", "0.5016467", "0.5003911", "0.49980125", "0.4967239", "0.49660823", "0.49484655", "0.4921297" ]
0.7233146
0
Parse read and quality strings from a FASTQ file with sequencing reads.
def readFastq(filename): sequences = [] qualities = [] with open(filename) as fh: while True: fh.readline() # skip name line seq = fh.readline().rstrip() #read base sequence fh.readline() # skip placeholder line qual = fh.readline().rstrip() # base quality line if len(seq) == 0: break sequences.append(seq) qualities.append(qual) return sequences, qualities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFastq(filename):\n\tsequences = []\n\tqualities = []\n\twith open(filename, 'r') as f:\n\t\twhile True: \n\t\t\tf.readline() # skip name line\n\t\t\tseq = f.readline().rstrip()\n\t\t\tf.readline() # skip place holder line \n\t\t\tq = f.readline().rstrip()\n\t\t\tif len(seq) ==0:\n\t\t\t\tbreak \n\t\t\tsequences.append(seq)\n\t\t\tqualities.append(q)\n\treturn sequences, qualities", "def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)", "def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()", "def read_seqs(f):\n while True:\n # Read the sequence ID. If there's nothing to read, then we're done.\n try:\n seq_id = read_line(f)\n except EOFError:\n return\n\n # If we successfully read a sequence ID, then running out of stuff to\n # read means a truncated record.\n try:\n seq = str_to_byte_array(read_line(f))\n qual_id = read_line(f)\n qual = str_to_byte_array(read_line(f))\n except EOFError:\n raise EOFError('EOF while reading sequence.')\n\n # Some simple checks of the data.\n if seq_id[0] != '@':\n raise ValueError(\"Sequence ID doesn't begin with '@'.\")\n if qual_id[0] != '+':\n raise ValueError(\"Quality ID doesn't begin with '+'.\")\n if len(seq) != len(qual):\n raise ValueError(\"Sequence and quality are different lengths.\")\n\n yield (seq_id, seq, qual_id, qual)", "def process_fastq(fastq_file):\n current_record = {}\n\n for name, seq, blank, quality in zip(*[iter(fastq_file)]*4):\n current_record['name'] = name.strip('\\n')\n current_record['seq'] = seq.strip('\\n')\n current_record['quality'] = quality.strip('\\n')\n\n yield current_record", "def readfq(fp): # this is a generator function\n last = None # this is a buffer keeping the last unprocessed line\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n if not last: break\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last: break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs); # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break", "def read_fastq(filename, strip_second_header=True):\n\n with open(filename) as fastq:\n line = fastq.readline()\n if not line.startswith(\"@\"):\n raise IOError(\"Not FASTQ format? First line didn't start with @\")\n while fastq:\n if line.startswith(\"@\"):\n header = line.rstrip()\n seq = fastq.readline().rstrip()\n second_header = fastq.readline()\n if strip_second_header:\n second_header = \"+\"\n scores = fastq.readline().rstrip()\n yield header, seq, second_header, scores\n elif line == \"\": # EOF\n yield header, seq, second_header, scores\n break\n line = fastq.readline()", "def read(infile):\n if isinstance(infile, str):\n infile = open(infile)\n\n with infile:\n while True:\n cmt = infile.readline().strip()\n seq = infile.readline().strip()\n plus = infile.readline().strip()\n qual = infile.readline().strip()\n\n if not cmt:\n break\n if not cmt.startswith('@') or plus != '+':\n raise ValueError('fastq file <{}> is corrupted'.format(infile.path))\n yield SRecord(cmt=cmt[1:], seq=seq, qual=qual)", "def fastq_reader(fastq):\n group_gen = grouper(fastq, 4)\n for record in group_gen:\n # drop the @ before the name and any text after a whitespace\n name = record[0].split(' ')[0][1:].strip()\n seq = record[1].strip()\n yield name, seq", "def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)", "def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences", "def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def read_sequence(filename):\n record = next(SeqIO.parse(filename, \"fasta\"))\n return record.description, str(record.seq)", "def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq", "def FastqIterator(fh):\n def readTotitle(fh, titleChar):\n \"\"\"returns a tuple ([lines before the next title line], next tile line)\n \"\"\"\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)\n\n if type(fh) in StringTypes:\n fh = file(fh)\n\n preLines,nextTitleLine =readTotitle(fh,'@')\n\n while nextTitleLine != None:\n seqTitle = nextTitleLine[1:].rstrip()\n preLines,nextTitleLine=readTotitle(fh,'+')\n qualTitle = nextTitleLine[1:].rstrip()\n if len(qualTitle.strip()) > 0 and seqTitle != qualTitle:\n print seqTitle\n print preLines\n print qualTitle\n raise hmmErrors.InvalidFastq, \"Error in parsing: @title sequence entry must be immediately followed by corresponding +title quality entry.\"\n seqLines = preLines\n qualLines = []\n for i in range(len(seqLines)): # Quality characters should be the same length as the sequence\n qualLines.append( fh.readline().strip() )\n\n preLines,nextTitleLine=readTotitle(fh,'@')\n\n yield (seqTitle, ''.join(seqLines), ''.join(qualLines))", "def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))", "def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence", "def test_parse_fasta_file(self):\r\n\r\n fasta_data = ['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\r\n\r\n expected_fasta = {'seq1': 'AAACGT', 'seq2': 'ACGGT'}\r\n\r\n expected_order = ['seq1 SAMPLE1', 'seq2']\r\n\r\n actual_fasta, actual_order = parse_fasta_file(fasta_data)\r\n\r\n self.assertEqual(actual_fasta, expected_fasta)\r\n\r\n self.assertEqual(actual_order, expected_order)", "def main(fileToCheck, minLength=-1, maxLength=-1):\n\n # Initialise variables.\n lineCount = 1 # The number of the line being examined. Used for displaying error messages.\n protDescription = True # Whether or not we are currently expecting a line starting with >.\n firstLine = True # Whether or not we are currently examining the first line of the file.\n proteinsInFile = {} # A dictionary indexed by the protein description line of the FASTA file.\n # The value of each entry is the correctly formatted protein sequence corresponding to the index.\n\n # Strip off all excess whitespace, and split the string into the individual lines of the file.\n checking = fileToCheck.rstrip()\n checking = checking.lstrip()\n checking = checking.split('\\n')\n for line in checking:\n line = line.rstrip()\n if firstLine:\n # True if we have just started parsing the file string, and haven;t yet examined any lines.\n if line[0] == '>':\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n firstLine = False\n else:\n # The first line of the file MUST be a protein description line (i.e. start with '>'). If the line was not\n # the beginning of a protein record, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n elif protDescription:\n # This is true only if a line beginning with a '>' is expected.\n if line[0] == '>':\n # Expected a protein description line, and found a protein description line. This means that the entire sequence\n # of the currentProt protein has been found (i.e. we have finished inspecting the sequence of a protein, and\n # have found the protein to be valid). Now determine if the length of the sequence is within the user\n # specified bounds.\n if minLength == -1:\n if maxLength == -1:\n # If there are no restrictions on the protein sequence length, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If there is no minimum length restriction, and the protein sequence is not longer than the maximum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n # If there is no maximum length restriction, and the protein sequence is not shorter than the minimum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If the protein sequence is not shorter than the minimum sequence length permitted and not longer\n # than the maximum length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n else:\n # If the line does not begin with a '>', and it is expected to, it is possible that the amino acid sequence\n # is split over multiple lines.\n if line.isalpha():\n # If every character on the line is a letter, then the line contains a valid portion of the sequence.\n # Add the uppercase version of the sequence portion to the sequence currently being recorded.\n currentSeq += line.upper()\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n else:\n # If an amino acid sequence is expected.\n if line.isalpha():\n # If the line is all alphabetic characters, write the line out and indicate that we are expecting a\n # protein description line next (i.e. one beginning with a '>').\n currentSeq += line.upper()\n protDescription = True\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to contain only letters, but instead got: \" + line\n return 2, errorMessage\n\n lineCount += 1\n\n # Catch the final protein from the file, and determine whether it should be recorded.\n if minLength == -1:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n\n if len(proteinsInFile.keys()) < 2:\n # There are too few protein sequences entered\n errorMessage = (\"Not enough unique protein sequences have been entered.\" +\n \" This is possibly caused by not enough sequences of the required minimum and maximum length being provided.\"\n )\n return 3, errorMessage\n elif protDescription:\n # Return an indication that the FASTA file is correctly formatted.\n outputString = ''\n for i in proteinsInFile.keys():\n outputString += i + '\\n' + proteinsInFile[i] + '\\n'\n return 0, outputString[:-1]\n else:\n # The file did not end with a protein sequence.\n errorMessage = \"Reached the end of the file, but no protein sequence found for the final protein.\"\n return 3, errorMessage", "def readFASTA(filename, alpha = None, string_only = False):\n seqlist = []\n seqname = None\n seqinfo = None\n seqdata = []\n fh = open(filename)\n thisline = fh.readline()\n while (thisline):\n if (thisline[0] == '>'): # new sequence\n if (seqname): # take care of the data that is already in the buffer before processing the new sequence\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \"+seqname+\" is invalid (ignored): \", e, file=sys.stderr)\n seqinfo = thisline[1:-1] # everything on the defline is \"info\"\n seqname = seqinfo.split()[0] # up to first space\n seqdata = []\n else: # pull out the sequence data\n cleanline = thisline.split()\n for line in cleanline:\n seqdata.extend(tuple(line.strip('*'))) # sometimes a line ends with an asterisk in FASTA files\n thisline = fh.readline()\n\n if (seqname):\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \" + seqname + \" is invalid (ignored): \", e, file=sys.stderr)\n else:\n raise RuntimeError(\"No sequences on FASTA format found in this file\")\n fh.close()\n return seqlist", "def read_fasta(fp):\n name, seq = None, []\n for line in fp:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))", "def iseq_to_qseq_fields(line, barcode_in_header,\r\n barcode_length, barcode_qual_c='b'):\r\n record = line.strip().split(':')\r\n rec_0_1, rec_0_2 = record[0].split('_')\r\n rec_4_1, rec_4_23 = record[4].split('#')\r\n rec_4_2, rec_4_3 = rec_4_23.split('/')\r\n if barcode_in_header:\r\n barcode = rec_4_2[:barcode_length]\r\n sequence = record[5]\r\n barcode_qual = barcode_qual_c * barcode_length\r\n sequence_qual = record[6]\r\n else:\r\n barcode = record[5][:barcode_length]\r\n sequence = record[5][barcode_length:]\r\n barcode_qual = record[6][:barcode_length]\r\n sequence_qual = record[6][barcode_length:]\r\n return (rec_0_1, rec_0_2, record[1], record[2], record[3],\r\n rec_4_1, rec_4_2, rec_4_3), sequence, sequence_qual,\\\r\n barcode, barcode_qual", "def parse_joined_fastq(path: Path, counts: Mapping[str, int]) -> Generator[SeqRecord, None, None]:\n sequence_id_map = dict()\n\n for record in SeqIO.parse(path, format=\"fastq\"):\n try:\n sequence_id = sequence_id_map[str(record.seq)]\n except KeyError:\n sequence_id = f\"read_len_{len(sequence_id_map) + 1}\"\n sequence_id_map[str(record.seq)] = sequence_id\n\n yield SeqRecord(record.seq, id=sequence_id)\n\n counts[sequence_id] += 1", "def readSeq(seqFile):\n line = seqFile.readline()\n seq1 = line.rstrip()\n line = seqFile.readline()\n seq2 = line.rstrip()\n return (seq1, seq2)", "def parse_fastq (rec_lines):\n data = []\n data.append(rec_lines[0][1:])\n data.append(rec_lines[1])\n data.append(rec_lines[3])\n return data", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")" ]
[ "0.72407734", "0.7033901", "0.69433504", "0.67568797", "0.6737888", "0.66820616", "0.65858674", "0.653524", "0.652288", "0.6501695", "0.6497522", "0.64846104", "0.64839154", "0.646596", "0.63651484", "0.63603467", "0.63447005", "0.6319669", "0.63027084", "0.6278591", "0.6254532", "0.623839", "0.62380093", "0.62327605", "0.62280685", "0.62178034", "0.6195038", "0.61865884", "0.61806303", "0.6160223" ]
0.74991745
0
Create a hash map between kmers and readings.
def kmerHashMap(reads, k): kmers_dict = {} # loop through all reads for i in range(len(reads)): # loop read's bases, except for the last k, to obtain its kmers for j in range(1+len(reads[i])-k): kmer = reads[i][j:k+j] if kmers_dict.has_key(kmer): kmers_dict[kmer].add(i) else: kmers_dict[kmer] = set([i]) return kmers_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)", "def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def get_results_for_init(self):\n return dict(init=self.centroids, n_clusters=self.centroids.shape[0])", "def build_map(model: str, n: int, kwc: int) -> Map:\n PKWS.clear()\n fited = cluster(n, model)\n return Map(\n cats=list(map(\"c-{}\".format, range(1, n + 1))),\n kws=list(\n map(\n lambda c: \", \".join(\n map(\n lambda x: x[0],\n count_it(\n Counter(\n chain.from_iterable(\n map(\n lambda ie: model == \"bert\"\n and SS_BERT.get(YS[model][ie[0]], [])\n or model == \"glove\"\n and SS_TFIDF[ie[0]]\n or SS_GLOVE[ie[0]],\n filter(\n lambda ie: ie[1] == c,\n enumerate(fited),\n ),\n ),\n )\n ),\n kwc,\n ),\n )\n ),\n range(n),\n )\n ),\n points=list(\n map(\n lambda y, x_y, x: Point(\n question=y, x=x_y[0], y=x_y[1], catagory=x,\n ),\n YS[model],\n XY[model],\n fited,\n )\n ),\n )", "def dict() -> Dict[str, Pin]:", "def ewriters():\n return dict(_ewriters)", "def hashdict(self):\n return {\n 'pix': super(rmap, self).hashdict(),\n 'map': hashlib.sha1(self.map.view(np.uint8)).hexdigest()\n }", "def new(num_buckets=256):\n aMap=[]", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def create_std_map():\n data = extract_data()\n data = data['Students']\n global std_map\n for item in data:\n std = Student(item['Rollno'])\n std.cached_data(item['Name'],item['Gender'],item['Sgpa'] \\\n ,item['Cgpa'],item['Points'],item['Rank'],item['G_rank'])\n std_map[item['Rollno']] = std", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def Dictionary_create(nMarkers, markerSize):\n pass", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def __init__(self):\n self.map = defaultdict(list)", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def readKerning(self):\n\t\tdata = self._fileSystem.readKerning()\n\t\tif data is None:\n\t\t\treturn\n\t\tkerning = {}\n\t\tfor side1 in data:\n\t\t\tfor side2 in data[side1]:\n\t\t\t\tvalue = data[side1][side2]\n\t\t\t\tkerning[side1, side2] = value\n\t\treturn kerning", "def get_speakers_map(self):\n speakers = {}\n for clu in self:\n speakers[clu] = self[clu].get_speaker()\n return speakers", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def finalize_readings(self):\n finalized_readings = {}\n for reading in self.intermediate_readings:\n finalized_readings[reading] = list(\n self.intermediate_readings[reading].values()\n )\n return finalized_readings", "def custom_dictionary(nMarkers, markerSize):\n pass", "def create_data():\n # Locations\n data = {}\n num_vehicles = 20\n depot = 0\n locations = loc1\n demands = popn\n\n num_locations = len(locations)\n dist_matrix = {}\n\n for from_node in range(0,num_locations):\n dist_matrix[from_node] = {}\n\n for to_node in range(0,num_locations):\n dist_matrix[from_node][to_node] = (\n haversine(\n locations[from_node],[to_node])\n #locations[to_node],[from_node])\n \"\"\"\n data[\"distances\"] =dist_matrix\n data[\"num_locations\"] = len(dist_matrix)\n data[\"num_vehicles\"] = 6\n data[\"depot\"] = 0\n data[\"demands\"] = demands\n #data[\"vehicle_capacities\"] = capacities\n data[\"time_per_demand_unit\"] = 0.05\n return data\n \"\"\"\n return [ num_vehicles, depot, locations, dist_matrix]", "def get_cache(self):\n cache = {}\n for i in range(5):\n cache['%02d' % i] = {\n Arrays.OFFSETS: self._get_array_value(Arrays.OFFSETS)[i],\n Arrays.SCALES: self._get_array_value(Arrays.SCALES)[i]\n }\n return cache", "def qiskit_circuit_measurement_map(c: QiskitCircuit) -> Dict[int, int]:\n measurements = [x for x in c.data if x[0].name == 'measure']\n return {\n c.qubits.index(x[1][0]): c.clbits.index(x[2][0])\n for x in measurements\n }", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def shards(self):\n shards_per_node = {}\n for node in self.nodes:\n num_shards = 0\n metrics = self.metrics(node)\n for family in metrics:\n for sample in family.samples:\n if sample.name == \"vectorized_reactor_utilization\":\n num_shards = max(num_shards,\n int(sample.labels[\"shard\"]))\n assert num_shards > 0\n shards_per_node[self.idx(node)] = num_shards\n return shards_per_node", "def get_term_map(self):\n\t\tterm_map = {}\n\t\tfor i, ranking in enumerate(self.get_descriptors(self.top_terms)):\n\t\t\tfor term in ranking:\n\t\t\t\tif not term in term_map:\n\t\t\t\t\tterm_map[term] = [ i ]\n\t\t\t\telse:\n\t\t\t\t\tterm_map[term].append(i)\n\t\treturn term_map" ]
[ "0.59770435", "0.5825306", "0.5625832", "0.5522428", "0.5472376", "0.5450063", "0.54420954", "0.5432603", "0.5386902", "0.5372055", "0.5325172", "0.5304393", "0.53000337", "0.5292728", "0.5285071", "0.5272437", "0.52629334", "0.5260887", "0.5257762", "0.5255245", "0.5242287", "0.52381957", "0.52374804", "0.52265465", "0.5225795", "0.52172726", "0.5212587", "0.521168", "0.5209874", "0.520263" ]
0.7121498
0
Calculate the squared L2 norm of the pattern.
def l2_norm(pattern): return np.linalg.norm(pattern)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def squared_norm(self) -> float:\n return self.__real**2 + self.__img[0]**2 + self.__img[1]**2 + self.__img[2]**2", "def norm_l2(v):\n return np.sqrt((v**2).sum())", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output", "def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)", "def l2(vec):\n return np.linalg.norm(vec)", "def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def norm(self):\n\t\treturn math.sqrt(self.norm2())", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def norm2(point):\n return np.sum(point**2, -1)", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def norm(self):\n\t\treturn np.sqrt(self.normSq())", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def normsq(self):\n return sum(x**2 for x in self.data)", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def norm(self):\n return sqrt(self.dot(self))" ]
[ "0.7689054", "0.7469105", "0.7411847", "0.7393104", "0.73873013", "0.73019034", "0.7256206", "0.72420824", "0.71961755", "0.7140405", "0.71325195", "0.71182483", "0.71150076", "0.7069896", "0.7034822", "0.7023806", "0.7019462", "0.69990593", "0.69920164", "0.69680566", "0.6951413", "0.69096494", "0.6767838", "0.6760333", "0.673882", "0.67322636", "0.6731022", "0.6729424", "0.6721004", "0.6703303" ]
0.8329601
0
Calculate the l2 norm of a stack of patterns.
def l2_norm_batch(pattern_stack): return np.linalg.norm(pattern_stack, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l2_norm(pattern):\n return np.linalg.norm(pattern)", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def _l2_norm_squared(self, z, theta):\n norms = np.zeros(shape=(len(z), self.n_states))\n\n for j in range(self.n_states):\n diff = theta[:, j] - z # ndarray of shape (n_samples, n_states) with differences\n norms[:, j] = np.square(np.linalg.norm(diff, axis=1)) # squared state conditional l2 norms\n\n return norms # squared l2 norm.", "def l2norm(array1,array2):\r\n tot = np.sum(np.abs(array1)**2)\r\n return np.sqrt(np.sum(np.abs(array1-array2)**2)/tot)", "def norm_L2(u):\n return norm_l2(u)/sqrt(float(u.size))", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def l2(vec):\n return np.linalg.norm(vec)", "def norm_l2(u):\n return linalg.norm(u.ravel())", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def compute_L2_normalization(xx):\r\n\treturn np.sum(xx ** 2, axis=1)", "def norm_l2(v):\n return np.sqrt((v**2).sum())", "def L2norm(self, array):\n norm = torch.sqrt(torch.sum(array * array))\n return norm", "def norm_with_l2(original_mat):\n normed_mat = np.zeros(original_mat.shape, dtype=np.float32)\n if len(original_mat.shape) == 2:\n for ind_r in range(original_mat.shape[0]):\n a = np.square(original_mat[ind_r]*1.0)\n b = np.sum(a)\n c = np.sqrt(b)\n normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / c\n # normed_mat[ind_r] = (original_mat[ind_r] * 1.0) / np.sqrt(np.sum(np.square(original_mat[ind_r])*1.0))\n return normed_mat", "def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n\n return output", "def l2_norm(v):\n res = 0\n for e in v:\n res += e * e\n return math.sqrt(res)", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def norm2(point):\n return np.sum(point**2, -1)", "def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()", "def inner_product_to_normalized_L2_square(matrix):\n\n length = matrix.shape[0]\n norm = np.divide(1, np.sqrt(l2_square_from_inner_product(matrix)))\n\n normalized_inner_product = np.multiply(np.multiply(np.reshape(norm, [length, 1]), matrix),\n np.reshape(norm, [1, length]))\n return 2 - 2 * normalized_inner_product", "def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)", "def l2norm(X):\n norm = np.linalg.norm(X, axis=1, keepdims=True)\n return 1.0 * X / norm" ]
[ "0.74098885", "0.6808609", "0.67808706", "0.67259216", "0.65520793", "0.65451896", "0.6460118", "0.64029825", "0.6392266", "0.62797666", "0.627647", "0.626877", "0.62292206", "0.6221982", "0.6214867", "0.62103754", "0.61965305", "0.61522037", "0.6128999", "0.6110056", "0.6074128", "0.6052297", "0.60296553", "0.6021897", "0.60171294", "0.5888251", "0.58822685", "0.58049625", "0.5804344", "0.5796051" ]
0.83394605
0
Calculate the inner product of the two patterns as a vecter.
def inner_product(pattern_one, pattern_two): return np.sum(np.multiply(pattern_one, pattern_two))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def __mul__(self, other):\n return Vec2d(self.v[0] * other, self.v[1] * other)", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def _inner_product_a2(\n self, tangent_vec_a, tangent_vec_b, base_point, vertex_areas_bp\n ):\n laplacian_at_base_point = self._space.laplacian(base_point)\n return self.a2 * gs.sum(\n gs.einsum(\n \"...bi,...bi->...b\",\n laplacian_at_base_point(tangent_vec_a),\n laplacian_at_base_point(tangent_vec_b),\n )\n / vertex_areas_bp,\n axis=-1,\n )", "def _inner_product_c1(self, point_a, point_b, normals_bp, areas_bp):\n dna = self._space.normals(point_a) - normals_bp\n dnb = self._space.normals(point_b) - normals_bp\n return self.c1 * gs.sum(\n gs.einsum(\"...bi,...bi->...b\", dna, dnb) * areas_bp, axis=-1\n )", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot4(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]", "def _mulVectors(X1,X2):\n _checkSize(X1,X2)\n return sum([ X1[i] * X2[i] for i in range(len(X1))])", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n inner_prod_mat = self.metric_matrix(base_point)\n aux = gs.einsum(\"...j,...jk->...k\", gs.conj(tangent_vec_a), inner_prod_mat)\n return gs.dot(aux, tangent_vec_b)", "def _qij_vec_inner(a: int, b: int, i: int, j: int):\n vec_dagger = _qij_vec_dagger(a, b)\n vec = _qij_vec(i, j)\n sum_result = FermionOperator()\n for idx, term in enumerate(vec):\n sum_result += term * vec_dagger[idx]\n return sum_result", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def dot_product(first_vector, second_vector):\n first_unpacker = VectorUnpacker(first_vector)\n second_unpacker = VectorUnpacker(second_vector)\n if first_unpacker.unpacked_vector_length != second_unpacker.unpacked_vector_length:\n raise ApplicationError(\"Unpacked vector sizes are unequal\")\n\n # looks better than a 'map' one-liner to me\n value = 0\n for piece in zip(first_unpacker(), second_unpacker()):\n value += piece[0] * piece[1]\n\n return value" ]
[ "0.6906152", "0.6847398", "0.68069786", "0.67931485", "0.6750435", "0.6661231", "0.665229", "0.6584153", "0.6543558", "0.6477087", "0.645507", "0.64223343", "0.64149445", "0.64062977", "0.64052427", "0.6394551", "0.6389129", "0.6387153", "0.6369647", "0.6364393", "0.6364216", "0.63563746", "0.6349533", "0.6338918", "0.6337371", "0.6333778", "0.63327885", "0.6304801", "0.6301042", "0.6294226" ]
0.83498394
0
Calculate the inner product pair of each pattern in batch one and batch two. Notice that the pattern_stack_one variable represent the pattern along the zero dimension while the pattern_stack_two variable represent patterns along dimension one in the final distance matrix.
def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two): """ Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the other half. """ holder = np.zeros((pattern_num_one, pattern_num_two)) for l in range(pattern_num_one): for m in range(pattern_num_two): holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m])) return holder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def l2_norm_batch(pattern_stack):\n\n return np.linalg.norm(pattern_stack, axis=0)", "def generate_pattern_grid(words1, words2):\n # Convert word lists to integer arrays\n w1, w2 = (\n np.array([[ord(c) for c in w] for w in words], dtype=np.uint8)\n for words in (words1, words2)\n )\n\n if len(w1) == 0 or len(w2) == 0:\n return np.zeros((len(w1), len(w2)), dtype=np.uint8)\n\n # equality_grid[a, b, i, j] represents whether the ith letter\n # of words1[a] equals the jth letter of words2[b]\n equality_grid = np.zeros((len(w1), len(w2), 5, 5), dtype=bool)\n for i, j in it.product(range(5), range(5)):\n equality_grid[:, :, i, j] = np.equal.outer(w1[:, i], w2[:, j])\n\n patterns = np.zeros((len(w1), len(w2)), dtype=np.uint8)\n three_pows = (3**np.arange(5)).astype(np.uint8)\n for i, tp in enumerate(three_pows):\n # This accounts for yellow squares\n patterns[:, :] += tp * equality_grid[:, :, i, :].any(2)\n # This accounts for green squares\n patterns[:, :] += tp * equality_grid[:, :, i, i]\n\n return patterns", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def dim_mul(dims1, dims2):\n return (\n dims1[0] + dims2[0],\n dims1[1] + dims2[1],\n dims1[2] + dims2[2],\n dims1[3] + dims2[3],\n dims1[4] + dims2[4],\n dims1[5] + dims2[5],\n dims1[6] + dims2[6],\n )", "def axis_element_wise_multiplication(t1, t2, which_axis):\n # assert len(K.int_shape(t1)) == len(K.int_shape(t2)) + 1, \"rank(t1) should be rank(t2) + 1\"\n slices = tf.unstack(t1, axis=which_axis)\n # assert K.int_shape(slices[0]) == K.int_shape(t2), \"Slices of t1 were not the same shape as t2\"\n multiplies = []\n for s in slices:\n multiplies.append(t2 * s)\n return tf.stack(multiplies, axis=2)", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def double_chop_pairs(\n x1, y1, z1, w1, cell1, x2, y2, z2, w2, indx2, rbins_squared, result):\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n n1 = x1.shape[0]\n nbins = rbins_squared.shape[0]\n\n for i in range(start, n1, stride):\n px = x1[i]\n py = y1[i]\n pz = z1[i]\n pw = w1[i]\n\n cell1_i = cell1[i]\n first = indx2[cell1_i]\n last = indx2[cell1_i+1]\n\n for j in range(first, last):\n qx = x2[j]\n qy = y2[j]\n qz = z2[j]\n qw = w2[j]\n\n dx = px-qx\n dy = py-qy\n dz = pz-qz\n wprod = pw*qw\n dsq = dx*dx + dy*dy + dz*dz\n\n k = nbins-1\n while dsq <= rbins_squared[k]:\n cuda.atomic.add(result, k-1, wprod)\n k = k-1\n if k <= 0:\n break", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs", "def gradient_merge_arrays(cls, image_one, image_two):\n if image_one.shape != image_two.shape:\n raise AttributeError(\"shapes do not match: {} vs {}\".format(image_one.shape, image_two.shape))\n height = image_one.shape[0]\n vector_one = numpy.array([1.0 - float(i + 1) / (height + 1) for i in range(height)])\n vector_two = numpy.array([float(i + 1) / (height + 1) for i in range(height)])\n return (image_one * vector_one[:, numpy.newaxis]) + (image_two * vector_two[:, numpy.newaxis])", "def mul(self,mat1,mat2):\n if(isinstance(mat2,int)==True):\n result = [[mat1[i][j] * mat2 for j in range(len(mat1[0]))] for i in range(len(mat1))]\n self.out = result\n return self.out\n elif(len(mat1[0])==len(mat2)):\n result = [[sum(a*b for a,b in zip(i,j)) for j in zip(*mat2)] for i in mat1]\n self.out = result\n return self.out", "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def _layerwise_dot_product(x_s, y_s):\n return [torch.sum(x * y).item() for x, y in zip(x_s, y_s)]", "def pairwise_distances(a, b, p=2):\n squeezed = False\n if len(a.shape) == 2 and len(b.shape) == 2:\n a = tf.expand_dims(a,0) #[np.newaxis, :, :]\n b = tf.expand_dims(a,0) #b[np.newaxis, :, :]\n squeezed = True\n \n ret = tf.reduce_sum(tf.keras.backend.pow(tf.math.abs(tf.expand_dims(a,2) - tf.expand_dims(b,1)), p), 3)\n #[:, :, np.newaxis, :], [:, np.newaxis, :, :]\n if squeezed:\n ret = tf.squeeze(ret)\n\n return ret", "def pair_eval(self, X, Y):\n d1 = self.d1\n K1 = self.k1.pair_eval(X[:, :d1], Y[:, :d1])\n K2 = self.k2.pair_eval(X[:, d1:], Y[:, d1:])\n return K1 * K2", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def product_2(m1, m2):\r\n return make_mono_admissible_2(list(m1) + list(m2))", "def innerprod_q2(q1, q2):\n T = q1.shape[1]\n val = sum(sum(q1 * q2)) / T\n\n return (val)", "def dist_matrix(self, group1, group2):\n \n tmps = []\n for i in group2:\n tmps.append([])\n for j in group1:\n mi, label = self.distance(i, j)\n tmps[-1].append(mi)\n return tmps", "def _interleave_ecdfs(\n x1: np.ndarray,\n y1: np.ndarray,\n x2: np.ndarray,\n y2: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n assert len(x1.shape) == len(x2.shape) == 1\n assert x1.shape == y1.shape\n assert x2.shape == y2.shape\n\n x = np.sort(np.concatenate([x1, x2]))\n y1 = np.insert(y1, 0, [0])\n y2 = np.insert(y2, 0, [0])\n return x, y1[np.searchsorted(x1, x, side='right')], y2[np.searchsorted(x2, x, side='right')]" ]
[ "0.773317", "0.5649336", "0.5526998", "0.54341394", "0.53960305", "0.5378274", "0.5336186", "0.5309336", "0.529776", "0.5289525", "0.5283448", "0.52754015", "0.52056116", "0.5135578", "0.50863296", "0.5050334", "0.5024294", "0.49954486", "0.4992397", "0.49735647", "0.49717188", "0.4961927", "0.4959781", "0.4940064", "0.49399367", "0.4932615", "0.49212724", "0.49055746", "0.48976764", "0.48897642" ]
0.87116134
0
Apply np.exp( matrix/two_sigma_square) elementwise.
def gaussian_dense(matrix, two_sigma_square): return np.exp(- matrix / two_sigma_square)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exp(tensor):\n return _elementary_op(tensor, np.exp, np.exp)", "def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))", "def Exp(A, B):\n return A.dot(expm(B))", "def kernel_sqExp(a,b, ls=1, sv=1):\n a = a.T/ls\n b = b.T/ls\n D, n = np.shape(a)\n d, m = np.shape(b)\n sqdist = np.tile((a**2).T, [1, m]) + np.tile(b*b, [n, 1]) - 2*np.dot(a.T,b)\n my_kernel = (sv**2) * np.exp(-0.5*sqdist)\n \n # written all out to illustrate (need to make sure a, b are in original dimensions):\n# my_kernel2 = np.zeros((n, m))\n# for i in range(n):\n# for j in range(m):\n# ai = a[i]\n# bj = b[j]\n# my_kernel2[i, j] = np.exp(-1/(2*ls**2) * (ai-bj)**2 )\n# my_kernel2 = my_kernel2 * (sv**2)\n \n return my_kernel", "def expms(A, eig=np.linalg.eigh):\r\n # TODO: check that this works reliably for low rank matrices\r\n # first: symmetrize A\r\n D, B = eig(A)\r\n return np.dot(B, (np.exp(D) * B).T)", "def exponentialfcn(x: np.ndarray) -> np.ndarray:\n\n x2 = x**2\n scores = -np.exp(-0.5 * np.sum(x2, axis=1))\n return scores", "def ga_exp(B):\n if np.sum(np.abs(B.value)) < np.finfo(float).eps:\n return cf.MultiVector(layout, unit_scalar_mv.value)\n return cf.MultiVector(layout, val_exp(B.value))", "def gaussian_smearing(E: NDArray[Shape['Ngrid'], Number],\n E0: NDArray[Shape['*, ...'], Number],\n sigma: float):\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -(np.broadcast_to(E, E0.shape + E.shape) - np.expand_dims(E0, len(E0.shape))) ** 2 / (2 * sigma ** 2))", "def exp_env(N, sr, lam = 3):\n return np.exp(-lam*np.arange(N)/sr)", "def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))", "def sum_exp(self,time):\n sum = np.sum([self.A[i]*np.exp(-1.*self.lambdas[i]*time)\\\n for i in xrange(self.m)])\n return sum", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def gauss_2d(N, sigma = 0.25):\r\n x, y = make_xy(N)\r\n sigma_pixel = sigma * np.float(N)\r\n arrayout = np.exp(-(x**2 + y**2) / sigma_pixel**2) / (np.pi * sigma_pixel**2)\r\n return arrayout", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def test_perform_sigm_times_exp(self):\r\n x, y, z, t = tensor.vectors('x', 'y', 'z', 't')\r\n exp = tensor.exp\r\n\r\n def ok(expr1, expr2):\r\n trees = [parse_mul_tree(e) for e in (expr1, expr2)]\r\n perform_sigm_times_exp(trees[0])\r\n trees[0] = simplify_mul(trees[0])\r\n good = theano.gof.graph.is_same_graph(\r\n compute_mul(trees[0]),\r\n compute_mul(trees[1]))\r\n if not good:\r\n print trees[0]\r\n print trees[1]\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[0]))\r\n print '***'\r\n theano.printing.debugprint(compute_mul(trees[1]))\r\n assert good\r\n ok(sigmoid(x) * exp(-x), sigmoid(-x))\r\n ok(-x * sigmoid(x) * (y * (-1 * z) * exp(-x)),\r\n -x * sigmoid(-x) * (y * (-1 * z)))\r\n ok(-sigmoid(-x) *\r\n (exp(y) * (-exp(-z) * 3 * -exp(x)) *\r\n (y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) *\r\n -sigmoid(x),\r\n sigmoid(x) *\r\n (-sigmoid(y) * (-sigmoid(-z) * 3) * (y * 2 * ((z + t) * exp(z)))) *\r\n -sigmoid(x))\r\n ok(exp(-x) * -exp(-x) * (-sigmoid(x) * -sigmoid(x)),\r\n -sigmoid(-x) * sigmoid(-x))\r\n ok(-exp(x) * -sigmoid(-x) * -exp(-x),\r\n -sigmoid(-x))", "def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def func(self, X, a, b):\n return a*np.exp(-b*X)", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def _sigma_2(gam, eps):\n s0 = r0**2 * alpha / (3 * eps) / mec2_unit\n\n s1_1 = 16 * (1 - eps + eps**2) * np.log(gam / eps)\n s1_2 = -1 / eps**2 + 3 / eps - 4 - 4 * eps - 8 * eps**2\n s1_3 = -2 * (1 - 2 * eps) * np.log(1 - 2 * eps)\n s1_4 = 1 / (4 * eps**3) - 1 / (2 * eps**2) + 3 / eps - 2 + 4 * eps\n s1 = s1_1 + s1_2 + s1_3 * s1_4\n\n s2_1 = 2 / eps\n s2_2 = (4 - 1 / eps + 1 / (4 * eps**2)) * np.log(2 * gam)\n s2_3 = -2 + 2 / eps - 5 / (8 * eps**2)\n s2 = s2_1 * (s2_2 + s2_3)\n\n return s0 * np.where(eps <= 0.5, s1, s2) * heaviside(gam - eps)", "def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.exp())", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def eval(self, X, Y):\n sigma2 = self.sigma2\n sumx2 = torch.sum(X**2, dim=1).view(-1, 1)\n sumy2 = torch.sum(Y**2, dim=1).view(1, -1)\n D2 = sumx2 - 2.0*torch.matmul(X, Y.transpose(1, 0)) + sumy2\n K = torch.exp(-D2/(2.0*sigma2))\n return K", "def compute_dual_energy(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N):\n # Compute lambdas for all pair states (0,0),(1,0),(0,1) and (1,1)\n lmbd_tmp = numpy.zeros([N, N, 4])\n lmbd_tmp[:, :, 0] = lambda_ij[:, :, 0] + lambda_ij[:, :, 0].T\n lmbd_tmp[:, :, 1] = lambda_ij[:, :, 0] + lambda_ij[:, :, 1].T\n lmbd_tmp[:, :, 2] = lambda_ij[:, :, 1] + lambda_ij[:, :, 0].T\n lmbd_tmp[:, :, 3] = lambda_ij[:, :, 1] + lambda_ij[:, :, 1].T\n # Compute dual energy\n dual_energy = -numpy.sum(phi_ij * numpy.exp(-1. - lmbd_tmp -\n gamma_ij[:, :, numpy.newaxis]))\n dual_energy -= numpy.sum(\n psi_i * numpy.exp(-1. + (N - 1.) + numpy.sum(lambda_ij, axis=0)) *\n (b_i / psi_i) ** (N - 1))\n dual_energy -= numpy.sum(gamma_ij)\n return dual_energy", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def gauss_2d(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def gauss_2d(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )", "def test_l2_metric_exp_vectorization(\n self, l2_metric_s2, times, landmarks_a, landmarks_b, landmarks_c\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_bc = l2_metric_s2.geodesic(landmarks_b, landmarks_c)\n landmarks_ab = landmarks_ab(times)\n landmarks_bc = landmarks_bc(times)\n\n tangent_vecs = l2_metric_s2.log(point=landmarks_bc, base_point=landmarks_ab)\n\n result = l2_metric_s2.exp(tangent_vec=tangent_vecs, base_point=landmarks_ab)\n self.assertAllClose(gs.shape(result), gs.shape(landmarks_ab))" ]
[ "0.60777706", "0.6070397", "0.6037772", "0.60048586", "0.59793174", "0.58501387", "0.58492404", "0.58091897", "0.57963014", "0.57812047", "0.57540333", "0.57262975", "0.57174045", "0.5708458", "0.5661374", "0.5589162", "0.55674833", "0.5548229", "0.5532804", "0.5519702", "0.54916966", "0.54835385", "0.5474528", "0.54680383", "0.5456158", "0.5456158", "0.5452313", "0.5418749", "0.5401341", "0.5395118" ]
0.7608249
0
Each row of the matrix, let's say the jth row, represents the distance between the other data point from the jth point. This function returns the indexes for the points with the smallest distances with respect to each point represented by that specified row. By row, I mean the 0th dimension. Also notice that this function includes the target particle, i.e. the diagonal element along the matrix is set to 1.
def nearest_points_indexes_with_self(matrix, num_to_keep): # Set the diagonal to 1 np.fill_diagonal(matrix, 1) # Get the position for the resulted values sort_arg = np.argsort(matrix, axis=1) return sort_arg[:, : num_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def nearest_points_indexes_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort_arg = np.argsort(matrix, axis=1)\n\n return sort_arg[:, : num_to_keep]", "def find_min_distance():\n return np.argmin(d)", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def nearest_points_values_with_self(matrix, num_to_keep):\n\n # Set the diagonal to 1\n np.fill_diagonal(matrix, 1)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def nearest_cluster(X,c):\n K = np.size(c,0)\n idx = np.zeros((np.size(X,0),1))\n arr = np.empty((np.size(X,0),1))\n for i in range(0,K):\n y = c[i]\n temp = np.ones((np.size(X,0),1))*y\n b = np.power(np.subtract(X,temp),2)\n a = np.sum(b,axis = 1)\n a.resize((np.size(X,0),1))\n arr = np.append(arr, a, axis=1)\n arr = np.delete(arr,0,axis=1)\n idx = np.argmin(arr, axis=1)\n return idx", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_closest_index(traj, point):\n\n\t#TODO: vectorise function to receive any length of points.\n\n\tdistances = np.subtract(np.array(point),traj) \n\tdistances = distances.reshape(-1,2)\n\t#distances = distances[~np.isnan(distances)].reshape(-1,2)\n\n\t#print(\"distances\")\n\t#pprint(distances)\n\tdist_array = np.linalg.norm(distances, axis = 1)\n\t#pprint(dist_array)\n\t#dist_array = np.sqrt((distances[:,0]**2)+(distances[:,1]**2)) #array of distances from trajectory to gaze landing point in world. \n\tidx = np.nanargmin(abs(dist_array)) #find smallest difference in pythag distance from 0,0 to get closest point. \n\tdists = distances[idx, :]\n\tdist = dist_array[idx]\n\n\treturn idx#, dists, dist\n\t#return idx", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def nearest_points_values_without_self(matrix, num_to_keep):\n\n # Set the diagonal to 0\n np.fill_diagonal(matrix, 0)\n # Get the position for the resulted values\n sort = np.sort(matrix, axis=1)\n\n return sort[:, : num_to_keep]", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def nearestNeighbor(self, coords, my_index, blacklist):\n min_index, min_distance = 0, 999999999\n my_col, my_row = coords[my_index]\n for i, (col, row) in enumerate(coords):\n if i in blacklist:\n continue\n distance = math.sqrt((my_col-col)**2 + (my_row-row)**2)\n if distance < min_distance:\n min_index, min_distance = i, distance\n return min_index", "def _nearest_cluster_distance(distances_row, labels, i):\n label = labels[i]\n b = np.min([np.mean(distances_row[labels == cur_label])\n for cur_label in set(labels) if not cur_label == label])\n return b", "def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def find_min_point(points):\r\n smallest_point_index = 0\r\n for i in range(1, len(points)):\r\n if points[i][1] < points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n elif points[i][0] > points[smallest_point_index][0] and points[i][1] == points[smallest_point_index][1]:\r\n smallest_point_index = i\r\n return smallest_point_index", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)" ]
[ "0.76846826", "0.7152678", "0.65159774", "0.6458269", "0.64570606", "0.6453487", "0.64491487", "0.6418172", "0.6344188", "0.6331945", "0.62991154", "0.62834746", "0.62771547", "0.62474954", "0.6215967", "0.6195537", "0.6191709", "0.6147389", "0.6129545", "0.6113139", "0.60623944", "0.606233", "0.6058435", "0.60123354", "0.5976111", "0.5973001", "0.5966138", "0.5958277", "0.59407294", "0.5921387" ]
0.7358082
1
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair.
def generateIdentityKeyPair(): keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair # return IdentityKeyPair(serialized=serialized)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_key_pair(self, keysize, cb):\n\n def gen_key_pair_pub_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n cb(ctx, data)\n\n def gen_key_pair_priv_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n xrun([self.sslname, 'rsa', '-pubout'], gen_key_pair_pub_cb,\n data, data)\n\n return xrun([self.sslname, 'genrsa', str(keysize)],\n gen_key_pair_priv_cb, None)", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"", "def generate_key(self, **options):\n\n return security_utils_services.generate_rsa_key(**options)", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key", "def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def test_generate_key_pair(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_key = Mock()\n mock_key.fingerprint = 'fake-fingerprint'\n mock_gpg.gen_key.return_value = mock_key\n\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n fake_key = encryptor.generate_key_pair(key_type=\"RSA\", length=4096, options={\n 'name_real': 'Fake Name', 'name_email': '[email protected]', 'name_comment': 'Fake comment'})\n\n self.assertEqual(mock_gpg.gen_key_input.call_count, 1)\n self.assertEqual(fake_key, mock_key.fingerprint)", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def _gen_key(self):\n\n input_data = self._gpg.gen_key_input(key_type=\"RSA\",\n key_length=self.key_length, name_real=self.name,\n name_comment=self.comment, name_email=self.email)\n\n log.info(\"Generating key: (%s)\" % input_data)\n\n self.key = self._gpg.gen_key(input_data)", "def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'ecparam', '-genkey', '-name', self.asn1_oid, '-out', self.key_file]\n\n self.log.info('Generating EC key')\n # Generate the keyfile with no password\n if not run_command(command):\n raise RuntimeError('EC key generation failed', self)\n\n # Now encrypt the key with a password, overwriting the original\n # passwordless key.\n if self.password:\n command = [\n openssl, 'ec',\n '-in', self.key_file,\n '-out', self.key_file,\n '-des3', '-passout', 'pass:{}'.format(self.password)\n ]\n self.log.info('Encrypting key with password')\n\n if not run_command(command):\n raise RuntimeError('EC key file password encryption failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def create_key ():", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:\n\n return None" ]
[ "0.71120954", "0.68204904", "0.65850353", "0.6553852", "0.65218014", "0.6443924", "0.6378423", "0.63755655", "0.6358998", "0.6324435", "0.63147116", "0.6309187", "0.62952316", "0.6251726", "0.61868006", "0.61828184", "0.61474425", "0.6133282", "0.61287004", "0.60888094", "0.6040706", "0.60231555", "0.60195357", "0.6008617", "0.5979798", "0.597343", "0.5961485", "0.5926425", "0.5888555", "0.5860322" ]
0.8180875
0
Generate a registration ID. Clients should only do this once, at install time.
def generateRegistrationId(): regId = KeyHelper.getRandomSequence() return regId
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_id():\n return uuid4().get_hex()", "def generateID(self):\n\n return str(uuid.uuid1())", "def get_id(self) -> str:\n return self._register_id", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def generate_uuid():\n return uuid.uuid4()", "def gen_uuid():\n return str(uuid.uuid4())", "def unique_id() -> str:", "def req_id_generator() -> str:\n # 8 chars long should be long enough, add the 'Generated' prefix to know not to search for this id in the elb logs\n return f'Generated-{str(uuid.uuid4())[:8]}'", "def generateId( self ):\n # try to use the uuid module\n try:\n import uuid\n return uuid.uuid1()\n \n # otherwise, use the random module\n except ImportError:\n import random\n return random.randint(-1000000000000, 1000000000000)", "def userIDGen() :\n\treturn __randomString(8)", "def gen_uuid() -> str:\n return str(uuid4())", "def new_uid():\n return str(uuid.uuid1())[:30]", "def generate_user_id() -> str:\n return 'u' + str((uuid.getnode()))", "def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def _generate_uuid():\n return str(uuid.uuid4())", "def generate_uuid():\n return uuid.uuid4()", "def generate_uuid():\n return uuid.uuid4().hex", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def generate_uuid():\n return f'{uuid.uuid1()}'", "def gen_id(self) -> str:\n self._id += 1\n return str(self._id)", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def generate_key(self):\n return str(uuid4())", "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))" ]
[ "0.7785231", "0.7583207", "0.74686027", "0.74049723", "0.7362628", "0.72884035", "0.72758955", "0.72758955", "0.7264104", "0.7262016", "0.72611034", "0.72470003", "0.7220589", "0.721949", "0.7148431", "0.71448946", "0.7102137", "0.7092246", "0.70777285", "0.7075378", "0.7058048", "0.70572597", "0.70404285", "0.7021929", "0.70185554", "0.7004354", "0.6991372", "0.6982666", "0.6977142", "0.6976874" ]
0.8603011
0
Generate a list of PreKeys. Clients should do this at install time, and subsequently any time the list of PreKeys stored on the server runs low. PreKey IDs are shorts, so they will eventually be repeated. Clients should store PreKeys in a circular buffer, so that they are repeated as infrequently as possible. start The starting PreKey ID, inclusive. count The number of PreKeys to generate. the list of generated PreKeyRecords.
def generatePreKeys(start, count): results = [] start -= 1 for i in range(0, count): preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1 results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair())) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def gen_keys():", "def create(self, record_count, start_id, lock=None):\n\n message_reference_beginning = self.create_random_string(10)\n\n records = []\n\n for i in range(start_id, record_count + start_id):\n record = self.__create_record(i, message_reference_beginning)\n records.append(record)\n\n return records", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def get_generator(strains, reference_id, start, end):\n primary_keys = []\n vals = range(start, end+1)\n for val in vals:\n for strain in strains:\n primary_keys.append(strain+\"_\"+reference_id+\"_\"+str(val))\n return primary_keys", "def import_to_reids(record_count=8):\n\ttry:\n\t\tconn = redis.Redis(host=HOST,port=PORT,password=PASSWD)\n\texcept:\n\t\tprint 'connection error'\n\t\tsys.exit(0)\n\n\t# add to a set,transaction with pipeline\n\ttrans = conn.pipeline(transaction=True) \n\tset_name = 'activation_code'\n\ttry:\n\t\tfor i in xrange(record_count):\n\t\t\tcode = activation_code_generaor()\n\t\t\ttrans.sadd(set_name,code)\n\t\ttrans.execute() #commit all commands at a time\n\t\t# show the code\n\t\tprint'success,number of keys in a set:',conn.scard(set_name)\n\texcept:\n\t\tprint 'error,rollback'\n\t\tsys.exit(0)", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 4", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 8", "def generate_begin(self):\n\t\tlist = []\n\t\tlist += self.ks['begin']\n\t\treturn list", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def random_key_gen(self, number):\n if (number > len(self.vocables)):\n raise IndexError('Liste enthaelt nicht genuegend Elemente')\n else:\n k_list = list(self.vocables.keys())\n random.shuffle(k_list)\n for k in k_list[:number]:\n yield k", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def _get_primary_keys(self, table_name, num_rows):\n primary_key = self.metadata.get_primary_key(table_name)\n primary_key_values = None\n\n if primary_key:\n field = self.metadata.get_fields(table_name)[primary_key]\n\n generator = self.primary_key.get(table_name)\n\n if generator is None:\n if field['type'] != 'id':\n raise ValueError('Only columns with type `id` can be primary keys')\n\n subtype = field.get('subtype', 'integer')\n if subtype == 'integer':\n generator = itertools.count()\n remaining = np.inf\n elif subtype == 'string':\n regex = field.get('regex', r'^[a-zA-Z]+$')\n generator = exrex.generate(regex)\n remaining = exrex.count(regex)\n elif subtype == 'datetime':\n raise NotImplementedError('Datetime ids are not yet supported')\n else:\n raise ValueError('Only `integer` or `string` id columns are supported.')\n\n self.primary_key[table_name] = generator\n self.remaining_primary_key[table_name] = remaining\n\n else:\n remaining = self.remaining_primary_key[table_name]\n\n if remaining < num_rows:\n raise ValueError(\n 'Not enough unique values for primary key of table {}'\n ' to generate {} samples.'.format(table_name, num_rows)\n )\n\n self.remaining_primary_key[table_name] -= num_rows\n primary_key_values = pd.Series([x for i, x in zip(range(num_rows), generator)])\n\n return primary_key, primary_key_values", "def generate_keys(cls, des_key: str) -> List[list]:\n\n keys = []\n des_key = cls.string_to_bit_array(des_key)\n # Apply the initial Permutation on the key\n des_key = cls.permutation_expand(des_key, Tables.PC_1_TABLE)\n # Split it in to LEFT,RIGHT\n left, right = cls.n_split(des_key, 28)\n # Apply the 16 rounds\n for i in range(16):\n # Apply the shift associated with the round (not always 1)\n left, right = cls.shift(left, right, Tables.SHIFT_ARRAY[i])\n # Merge them\n tmp = left + right\n # Apply the Permutation to get the Ki\n keys.append(cls.permutation_expand(tmp, Tables.PC_2_TABLE))\n return keys", "def get_next_keys(self):\n P_List = []\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n P_List.append(int(construct_pass(key, choice)))\n return P_List", "def create_keypairs(self,\n names=None,\n count=1,\n public_key=None,\n check=True):\n names = names or utils.generate_ids(count=count)\n\n keypairs = []\n for name in names:\n keypair = self._client.create(name, public_key=public_key)\n keypairs.append(keypair)\n\n if check:\n self.check_keypairs_presence(keypairs)\n\n for keypair in keypairs:\n if public_key is not None:\n assert_that(keypair.public_key, equal_to(public_key))\n\n return keypairs", "def GetInputFileKeys(version):\n \n if version == 7:\n inputfile_keys = ['DynBrkFi','PtfmFile',\n 'TwrFile','FurlFile','BldFile(1)',\n 'BldFile(2)','BldFile(3)','NoiseFile','ADAMSFile',\n 'LinFile']\n \n elif version == 8:\n errStr = 'Keys for FAST 8 have not been coded yet.'\n ValueError(errStr)\n \n else:\n errStr = 'Uncoded version \\\"{:d}\\\".'.format(version)\n ValueError(errStr)\n \n return inputfile_keys", "def _get_keys(self, listOfKeys):\n return self._keys", "def generateDictKeys(string, n,step=1):\n if type(string) != str or type(n) != int:\n raise ValueError('Please input string and integer for first and second argument')\n elif step == 1:\n keylist = [string+str(i) for i in range(n)]\n return keylist\n else:\n keylist = [string+str(i) for i in range(0, n*step, step)]\n return keylist", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def __gen_keys__(self):\n if self.seed == b'':\n self.seed = urandom(self.seed_size)\n\n n_prev = Node(hash=hash_factory(data=bytes(self.seed)).digest())\n self.keys.insert(0, n_prev)\n\n for i in range(1, self.l + 1):\n n = Node(hash=hash_factory(data=bytes(n_prev.hash)).digest())\n self.keys.insert(0, n)\n n_prev = n\n\n # Add the decoy nodes as parents of pair nodes.\n # The pair nodes will _always_ be the right child of the decoy nodes.\n for i in range(2, self.l + 1, 2):\n n_pair = self.keys[i] # type: Node\n n_impair_prev = self.keys[i-1] # type: Node\n n_pair.parent = Node(hash=bytes(n_impair_prev.hash))\n n_pair.parent.right_child = n_pair", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def makePartition(self, count):\n\t\tvalidPart = (nr.random_sample(count) < self.proportion).astype(\"int32\")\n\t\ttrainPart = n.ones(count, dtype=\"int32\") - validPart\n\n\t\treturn self.toIndexes(trainPart), self.toIndexes(validPart)", "def iterkeys(self):\n\n for i in xrange(0, self._limit):\n try:\n self[i]\n yield i\n except KeyError:\n pass", "def prefix_keys(self, prefix, maxkeys=None):\n # TODO: write better documentation: describe purpose, provide example code\n if maxkeys is None:\n maxkeys = len(self)\n\n return wait(self.proto.fwmkeys(prefix, maxkeys))", "def generate_keys(self):\n\n\t\tcondition = False\n\t\t\n\t\t\t\n\t\twhile (not condition) :\n\t\t\t# step 1 : chose random primary numbers p and q\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._p = n\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\twhile(n == self._p):\n\t\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._q = n\n\n\t\t\t#step 2 : compute n = pq\n\t\t\tself.n = self._p * self._q\n\t\t\t\n\t\t\ta = find_invpow(self.n,4) // 3\n\t\t\tcondition = (self._p > self._q) and (self._p < 2 * self._q)\n\t\t\tif (not condition) :\n\t\t\t\tcontinue\n\n\t\t\tprint(\"step one OK\")\n\n\t\t\t#step 3 : compute phi(n)\n\t\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t\t#step 4 : chose the exponent\n\t\t\tn = randint(100,a)\n\t\t\twhile (gcd(self._phi,n) != 1):\n\t\t\t\tn = randint(100,self._phi)\n\t\t\tself._d = n\n\n\t\t\t#step 5 : compute d (private key)\n\t\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\t\tcondition = (self._d < a)\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)" ]
[ "0.6100436", "0.57589746", "0.5668012", "0.56462705", "0.5632386", "0.55703026", "0.5561076", "0.5519232", "0.5507431", "0.5470912", "0.546842", "0.54056185", "0.52409744", "0.5236303", "0.5193606", "0.5192819", "0.51398957", "0.50818324", "0.5079338", "0.5055663", "0.5039773", "0.50224555", "0.49965414", "0.4986866", "0.4963776", "0.49413583", "0.49408898", "0.49352106", "0.4926059", "0.49246648" ]
0.87624663
0
Check whether the given reader exists
def exists(reader_name: str) -> bool: return plugins.exists(package_name=__name__, plugin_name=reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def exists(identifier, network):\n foo = next(load(identifier, network), None)\n return foo is not None", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False", "def test_read_before_connected(connection, reader, loop):\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "def check_access(ident):\n resource = data_service.resource_load(uniq = ident)\n log.debug('Result from the database: %s'%resource)\n if resource is None:\n return False\n return True", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()", "def object_exists(self, fname):\n return False", "def object_exists(self, fname):\n return self.object_exists", "def is_file_exists(self):\n pass", "def object_exists(self, fname):\n return True", "def check_handle(handle):\n return os.path.isfile(get_path_filename(handle))", "def check_if_row_already_loaded(self, row, file_name):\n\t\tquery = \"SELECT count(*) FROM \" + TABLE_NAME + \" WHERE GLOBALEVENTID = \" + \"'\" + row[0] + \"'\"\n\n\t\ttry:\t\t\t\n\t\t\t# print query\n\t\t\tcursor = self.connection.cursor()\n\t\t\texecuted_cur = cursor.execute(query)\n\n\t\t\tif executed_cur:\t\t\t\n\t\t\t\tresult_cur = cursor.fetchall()\n\t\t\t\tfor row in result_cur:\n\t\t\t\t\tif int(row[0]) > 0:\n\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint \"[e] Something wrong with execution.\"\n\t\texcept Exception, e:\n\t\t\tprint '[e] Exeption: %s while processing \"%s\" file in method %s' % \\\n (str(e), DATA_DIRECTORY + '/' + file_name, \"check_if_row_already_loaded\")\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\n\n\t\treturn False", "def _check_row_exists(self, pk):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n if exists:\n return True\n return False", "def ResourceExists(resource_name, search_user_paths=True):\n try:\n ResourcePath(resource_name, search_user_paths)\n return True\n except ResourceNotFound:\n return False", "def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def test_exists(self):\n d = self._examineOrSelect()\n self._response(b'* 3 EXISTS')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'EXISTS': 3})", "def exist(self):", "def read(reader: BitStreamReader, _index: int) -> bool:\n\n return reader.readBool()", "def _file_exists(name):\n try:\n f = open(name)\n f.close()\n return True\n except IOError:\n return False", "def resource_exists(uri: Optional[str]) -> bool:\n\n if uri is None:\n return True\n\n # TODO Replace after finding way to pass custom fs through FireO validator\n if uri.startswith(\"gs://\"):\n return True\n\n else:\n # Get file system\n fs, uri = url_to_fs(uri)\n\n # Check exists\n if fs.exists(uri):\n return True\n\n return False", "def exists(self):\n return True", "def exists(self):\n return True", "def check(self):\n # validate contents still to do - for now just check if it exists\n return os.path.exists(self.getDefaultDatabaseConnectionParameter()['path'])", "async def exists(self, tag_name):\n try:\n if await self.get_id(tag_name):\n return True\n except RtbDoesntExists:\n return False", "def exists (self, uuid):\n return self.read (uuid) is not None", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)" ]
[ "0.6164184", "0.5904849", "0.58638656", "0.58638656", "0.5770025", "0.575396", "0.56898904", "0.5686118", "0.56144536", "0.56005126", "0.5580309", "0.5566034", "0.5542885", "0.55397743", "0.5537704", "0.55220896", "0.5509569", "0.54929805", "0.549026", "0.54900736", "0.54776174", "0.54772145", "0.5458027", "0.5456437", "0.5438363", "0.5438363", "0.5433666", "0.54315096", "0.54271394", "0.54217505" ]
0.66304976
0
Get one line documentation for readers If no readers are specified, documentation for all available readers are returned.
def short_docs(*readers: str) -> List[Tuple[str, str]]: if not readers: readers = names() return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readers():\n return all_readers", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def read_documentation(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n while lin[0] != ':':\r\n self.documentation.append(lin)\r\n lin = self.read_line(fid)\r\n return lin", "def get_docs_and_page():\n _, *args = sys.argv[:]\n if len(args) > 0:\n print(pydoc.getdoc(*args))\n return pydoc.getdoc(*args)", "def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)", "def get_reader_funcs():\n return READERS", "def get_documentation(self, *args, **dargs):\n pass", "def chain(*readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n\n for e in itertools.chain(*rs):\n yield e\n\n return reader", "def doc(self):\n return \"\\n\".join(self.docLines)", "def docs():", "def docLines(self):\n summary, description = self._getDocParts()\n if description:\n return summary + [\"\"] + description\n return summary", "def summarize_rcdocs(modnames, headersep=\"=\", maxdflt=2000):\n nods = \"No docstring provided.\"\n template = \":{0!s}: {1!s}, *default:* {2}.\"\n docstrs = []\n tw = textwrap.TextWrapper(width=80, subsequent_indent=\" \"*4)\n for modname in modnames:\n moddoc = str(modname)\n moddoc += \"\\n\"+ headersep * len(moddoc) + \"\\n\"\n plugins = Plugins([modname], loaddeps=False) # get a lone plugin\n plugins.merge_rcs()\n rc = plugins.rc\n rcdocs = plugins.rcdocs\n for key in sorted(rc._dict.keys()):\n dflt = getattr(rc, key)\n rdflt = repr(dflt)\n rdflt = rdflt if len(rdflt) <= maxdflt else \"{0}.{1} instance\".format(\n dflt.__class__.__module__, dflt.__class__.__name__)\n rcdoc = template.format(key, rcdocs.get(key, nods), rdflt)\n moddoc += \"\\n\".join(tw.wrap(rcdoc)) + '\\n'\n docstrs.append(moddoc)\n return \"\\n\\n\\n\".join(docstrs)", "def get_documentation(path=\"\"):\n return \"\"\"<HTML><head><title>Python Minidoc for \"\"\"+path+\"\"\"</title></head>\n <body>\n \"\"\"+get_documentation_body(path)+\"\"\"\n </body></html>\"\"\"", "def get_documented(filenames):\r\n documented = {}\r\n for filename in filenames:\r\n f = open(filename, 'r')\r\n lines = f.read().splitlines()\r\n documented.update(get_documented_in_lines(lines, filename=filename))\r\n f.close()\r\n return documented", "def get_documented(filenames):\n documented = {}\n for filename in filenames:\n f = open(filename, 'r')\n lines = f.read().splitlines()\n documented.update(get_documented_in_lines(lines, filename=filename))\n f.close()\n return documented", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def getDoc(self):\r\n return self.__doc__", "def parse_docs(docs):\n if not docs:\n return __name__, \"<no documentation>\"\n docs = docs.strip().split('\\n')\n for i, line in enumerate(docs):\n docs[i] = line.strip()\n return docs[0], ' '.join(docs[1:]) if len(docs[1:]) else \"<no documentation>\"", "def get_doc(cls_or_func):\n try:\n return cls_or_func.__doc__.split(\"\\n\")[0].strip()\n except (AttributeError, IndexError):\n return None", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def get_documented_in_docstring(name, module=None, filename=None):\r\n try:\r\n obj, real_name = import_by_name(name)\r\n lines = pydoc.getdoc(obj).splitlines()\r\n return get_documented_in_lines(lines, module=name, filename=filename)\r\n except AttributeError:\r\n pass\r\n except ImportError, e:\r\n print \"Failed to import '%s': %s\" % (name, e)\r\n return {}", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def documentation_only():\n pass", "def get(self, *args):\n return self.docs.get(*args)", "def get_documented_in_docstring(name, module=None, filename=None):\n try:\n obj, real_name = import_by_name(name)\n lines = pydoc.getdoc(obj).splitlines()\n return get_documented_in_lines(lines, module=name, filename=filename)\n except AttributeError:\n pass\n except ImportError, e:\n print \"Failed to import '%s': %s\" % (name, e)\n return {}", "def documentation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"documentation\")", "def documentation():\n return auto.html()", "def hotkeys_readme():\n\n root = '/'.join(__file__.split('/')[:-4])\n fname = root + '/README.rst'\n with codecs.open(fname, 'r', 'utf-8') as f:\n rst = f.read()\n hotkeys = rst.split('.. hotkeys')[1]\n return docutils.examples.html_body(hotkeys)" ]
[ "0.6139211", "0.5529351", "0.5529351", "0.5529351", "0.5385095", "0.53439957", "0.52958757", "0.5215697", "0.5117727", "0.49581614", "0.48913068", "0.4863882", "0.48443574", "0.48011702", "0.47767526", "0.47586027", "0.47446275", "0.4685375", "0.46740708", "0.4643249", "0.46230683", "0.46218795", "0.46174067", "0.4599343", "0.45940423", "0.4585905", "0.4578101", "0.45744273", "0.45646197", "0.45554435" ]
0.67184293
0
Read a bytes stream with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_stream( input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any ) -> Reader: if reader_name is None: reader_name = identify(input_stream) reader = plugins.call( package_name=__name__, plugin_name=reader_name, input_stream=input_stream, **reader_args, ) reader.read() return reader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def read_file(\n file_path: Union[str, pathlib.Path],\n reader_name: Optional[str] = None,\n **reader_args: Any,\n) -> Reader:\n with open(file_path, mode=\"rb\") as input_stream:\n return read_stream(input_stream, reader_name)", "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n 'Reader type {} not supported'.format(reader)\n )\n else:\n assert callable(reader), (\n 'Reader {} is not callable'.format(str(reader)))\n loader = reader\n returnedFromLoader = loader(filePath)\n returnedFromLoader.read()\n return returnedFromLoader", "def read_bytes(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'string':\n # Schema Resolution: promote to unicode string\n return stream.read(size).decode('utf-8')\n else:\n return stream.read(size)", "async def read_or_exc(reader, n, timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.read(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def reader_for_streaming(io):\n if not hasattr(io, 'read'):\n raise TypeError('{0} must be an opened file.'.format(io))\n if hasattr(io, 'encoding'):\n raise TypeError('{0} must be opened in binary mode'.format(io))\n return reader.Reader.read_headers(io)", "def read(self, source, sourcename=None, postcheck=True, strict=True):\n if isinstance(source, str):\n with open(source, mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or source,\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, pathlib.Path):\n with source.open(mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or str(source),\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, io.BufferedIOBase):\n return self.readTextStream(\n io.TextIOWrapper(source),\n sourcename,\n postcheck=postcheck,\n strict=strict,\n )\n elif not isinstance(source, io.TextIOBase):\n raise TypeError(\n \"Source must be file name (str or pathlib.Path) or \"\n \"readable stream of text data. Got {}\".format(type(source))\n )\n return self.readTextStream(\n source, sourcename, postcheck=postcheck, strict=strict\n )", "def schemaless_reader(stream, schema):\n acquaint_schema(schema)\n return read_data(stream, schema)", "async def readuntil_or_exc(reader, separator = b'\\n', timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.readuntil(separator), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def get_stream_reader(fh, tmp_dir):\n magic_dict = {\n b\"\\x1f\\x8b\\x08\": _get_stream_readers_for_gzip,\n b\"\\x42\\x5a\\x68\": _get_stream_readers_for_bz2,\n b\"\\x50\\x4b\\x03\\x04\": _get_stream_readers_for_zip,\n }\n start_of_file = fh.read(CHUNK_SIZE)\n try:\n fh.seek(0)\n except UnsupportedOperation: # This happens if fh has been created by urlopen\n fh = _download_file(start_of_file, fh)\n try: # Check if file is tar file\n if tarfile.open(fileobj=StringIO(start_of_file)):\n return _get_stream_readers_for_tar(fh, tmp_dir)\n except tarfile.ReadError:\n pass\n for k, v in magic_dict.items():\n if start_of_file.startswith(k):\n return v(fh, tmp_dir)\n return [fh]", "def _ReadStream(self, stream_name):\n file_object = self._OpenStream(stream_name)\n if not file_object:\n return b''\n\n try:\n data = file_object.read()\n finally:\n file_object.close()\n\n return data", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def read(ios):\n assert(isinstance(ios, io.IOBase))\n return Reader(ios).read()", "def do_read(fp, decoder):\r\n # read header\r\n header = fp.read(RecordIO.RECORD_HEADER_SIZE)\r\n if len(header) == 0:\r\n log.debug(\"%s has no data (current offset = %d)\" % (fp.name, fp.tell()))\r\n # Reset EOF (appears to be only necessary on OS X)\r\n fp.seek(fp.tell())\r\n return None\r\n elif len(header) != RecordIO.RECORD_HEADER_SIZE:\r\n raise RecordIO.PrematureEndOfStream(\r\n \"Expected %d bytes in header, got %d\" % (RecordIO.RECORD_HEADER_SIZE, len(header)))\r\n blob_len = struct.unpack('>L', header)[0]\r\n if blob_len > RecordIO.MAXIMUM_RECORD_SIZE:\r\n raise RecordIO.RecordSizeExceeded(\"Record exceeds maximum allowable size\")\r\n\r\n # read frame\r\n read_blob = fp.read(blob_len)\r\n if len(read_blob) != blob_len:\r\n raise RecordIO.PrematureEndOfStream(\r\n 'Expected %d bytes in frame, got %d' % (blob_len, len(read_blob)))\r\n return decoder.decode(read_blob)", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "async def read_stream(self, stream_read_request_body: StreamReadRequestBody = Body(None, description=\"\")) -> StreamRead:\n adapter = self._create_low_code_adapter(manifest=stream_read_request_body.manifest)\n schema_inferrer = SchemaInferrer()\n\n if stream_read_request_body.record_limit is None:\n record_limit = self.max_record_limit\n else:\n record_limit = min(stream_read_request_body.record_limit, self.max_record_limit)\n\n slices = []\n log_messages = []\n try:\n for message_group in self._get_message_groups(\n adapter.read_stream(stream_read_request_body.stream, stream_read_request_body.config),\n schema_inferrer,\n record_limit,\n ):\n if isinstance(message_group, AirbyteLogMessage):\n log_messages.append({\"message\": message_group.message})\n else:\n slices.append(message_group)\n except Exception as error:\n # TODO: We're temporarily using FastAPI's default exception model. Ideally we should use exceptions defined in the OpenAPI spec\n self.logger.error(f\"Could not perform read with with error: {error.args[0]} - {self._get_stacktrace_as_string(error)}\")\n raise HTTPException(\n status_code=400,\n detail=f\"Could not perform read with with error: {error.args[0]}\",\n )\n\n return StreamRead(\n logs=log_messages,\n slices=slices,\n test_read_limit_reached=self._has_reached_limit(slices),\n inferred_schema=schema_inferrer.get_stream_schema(stream_read_request_body.stream)\n )", "def test_fast_reader():\n text = \"a b c\\n1 2 3\\n4 5 6\"\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format=\"fast_basic\", guess=False, comment=\"##\")\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": True, \"use_fast_converter\": True},\n )\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == \"nt\":\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": False, \"use_fast_converter\": True},\n )\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format=\"fast_basic\", guess=False, fast_reader={\"foo\": True})\n\n # Use the slow reader instead\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\", fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\")", "def read_data(stream, writer_schema, reader_schema=None):\n if isinstance(writer_schema, dict):\n record_type = writer_schema['type']\n elif isinstance(writer_schema, list):\n record_type = 'union'\n else:\n record_type = writer_schema\n\n if reader_schema and record_type in AVRO_TYPES:\n if not match_schemas(writer_schema, reader_schema):\n raise SchemaResolutionError(\n 'Schema mismatch: %s does not match %s'\n % (writer_schema, reader_schema)\n )\n\n try:\n return READERS[record_type](stream, writer_schema, reader_schema)\n except SchemaResolutionError:\n raise\n except Exception as exc:\n raise ReadError(\n 'Failed to read %r type' % record_type, exc\n )", "def read_string(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'bytes':\n # Schema Resolution: promote to byte string\n return stream.read(size)\n else:\n return stream.read(size).decode('utf-8')", "def checked_read(in_stream, length, allow_eof=False):\n\n bytes = in_stream.read(length)\n if allow_eof and bytes == '':\n return bytes\n if len(bytes) != length:\n raise IOError(MSG_INCOMPLETE_READ)\n return bytes", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):\n reader = _lancaster.Reader(schema)\n buf = stream.read(buffer_size)\n remainder = b''\n while len(buf) > 0:\n values, n = reader.read_seq(buf)\n yield from values\n remainder = buf[n:]\n buf = stream.read(buffer_size)\n if len(buf) > 0 and len(remainder) > 0:\n ba = bytearray()\n ba.extend(remainder)\n ba.extend(buf)\n buf = memoryview(ba).tobytes()\n if len(remainder) > 0:\n raise EOFError('{} bytes remaining but could not continue reading '\n 'from stream'.format(len(remainder)))", "def try_read(self):\r\n pos = self._fp.tell()\r\n try:\r\n return self.read()\r\n except RecordIO.PrematureEndOfStream as e:\r\n log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))\r\n self._fp.seek(pos)\r\n return None", "def reader(name, version=None, mimetype=None):\n\treturn _data_processor('read', name, version, mimetype)", "def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))", "def read_fixed(stream, writer_schema, reader_schema=None):\n return stream.read(writer_schema['size'])", "def read(reader: BitStreamReader, _index: int) -> BitBuffer:\n\n return reader.readBitBuffer()", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)" ]
[ "0.6057923", "0.6019735", "0.5995071", "0.59576786", "0.5762868", "0.5712855", "0.5576423", "0.544311", "0.54237616", "0.54231197", "0.5403411", "0.53777176", "0.53776944", "0.52933985", "0.5291875", "0.52114725", "0.5191779", "0.5167444", "0.514691", "0.51206833", "0.510413", "0.50981784", "0.50818163", "0.5051923", "0.50517976", "0.503986", "0.50289977", "0.50232327", "0.50008893", "0.48906842" ]
0.6454055
0
Read a file with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_file( file_path: Union[str, pathlib.Path], reader_name: Optional[str] = None, **reader_args: Any, ) -> Reader: with open(file_path, mode="rb") as input_stream: return read_stream(input_stream, reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n 'Reader type {} not supported'.format(reader)\n )\n else:\n assert callable(reader), (\n 'Reader {} is not callable'.format(str(reader)))\n loader = reader\n returnedFromLoader = loader(filePath)\n returnedFromLoader.read()\n return returnedFromLoader", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)", "def read(filename, file_format=None, **kwargs):\n if not isinstance(filename, str):\n raise TypeError()\n if not (file_format is None or file_format in {\"tough\", \"json\"}):\n raise ValueError()\n\n fmt = (\n file_format\n if file_format\n else filetype_from_filename(filename, _extension_to_filetype)\n )\n fmt = fmt if fmt else \"tough\"\n\n return _reader_map[fmt](filename, **kwargs)", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def readerForFile(filename, encoding, options):\n ret = libxml2mod.xmlReaderForFile(filename, encoding, options)\n if ret is None:raise treeError('xmlReaderForFile() failed')\n return xmlTextReader(_obj=ret)", "def reader_from_file(load_dir: str, **kwargs):\n shared_resources = create_shared_resources()\n shared_resources.load(os.path.join(load_dir, \"shared_resources\"))\n if kwargs:\n shared_resources.config.update(kwargs)\n reader = readers[shared_resources.config[\"reader\"]](shared_resources)\n reader.load_and_setup_modules(load_dir)\n return reader", "def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)", "def read(self, location, **user_options):\n\n # Base the options off a copy to leave the Reader options uneffected.\n options = self.options.copy()\n options.update(user_options)\n\n # The directory option allows users to specify file locations relative\n # to a location other than the present working directory by joining the\n # location with the directory of their choice.\n if options.directory:\n location = os.path.join(options.directory, location)\n\n # When passed a directory as the location, the Reader recursively builds\n # a list of replays to return using the utils.get_files function. This\n # function respects the following arguments:\n # * depth: The maximum depth to traverse. Defaults to unlimited (-1)\n # * follow_symlinks: Boolean for following symlinks. Defaults to True\n # * exclude_dirs: A list of directory names to skip while recursing\n # * incldue_regex: A regular expression rule which all returned file\n # names must match. Defaults to None\n #\n replays, files = list(), utils.get_files(location, **options)\n\n # If no files are found, it could be for a variety of reasons\n # raise a NoMatchingFilesError to alert them to the situation\n if not files:\n raise exceptions.NoMatchingFilesError()\n\n for location in files:\n if options.verbose: print \"Reading: %s\" % location\n\n with open(location, 'rb') as replay_file:\n replays.append(self.make_replay(replay_file, **options))\n\n return replays", "def _read_file(self, options, datas):\n self.ensure_one()\n # guess mimetype from file content\n mimetype = guess_mimetype(datas)\n (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using guessed mimetype %s\", self.datas_fname or '<unknown>', self.id, mimetype)\n # try reading with user-provided mimetype\n (file_extension, handler, req) = FILE_TYPE_DICT.get(self.type, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using user-provided mimetype %s\", self.datas_fname or '<unknown>', self.id, self.type)\n # fallback on file extensions as mime types can be unreliable (e.g.\n # software setting incorrect mime types, or non-installed software\n # leading to browser not sending mime types)\n if self.datas_fname:\n p, ext = os.path.splitext(self.datas_fname)\n if ext in EXTENSIONS:\n try:\n return getattr(self, '_read_' + ext[1:])(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %s) using file extension\", self.datas_fname, self.id)\n if req:\n raise ImportError(_(\"Unable to load \\\"{extension}\\\" file: requires Python module \\\"{modname}\\\"\").format(extension=file_extension, modname=req))\n raise ValueError(_(\"Unsupported file format \\\"{}\\\", import only supports CSV, ODS, XLS and XLSX\").format(self.type))", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def readFile(subdir, name, errorOnNonexistent=False, default=None):\n\n fname = _getFilename(subdir, name)\n # Note: this would probably look more natural as:\n # if not os.path.exists(fname):\n # # ... file does not exist ...\n # else:\n # try:\n # with open(fname) as f:\n # # ... read file ...\n # except:\n # # ... assume file was malformed ...\n # but there's technically a race condition in the above: the file could be\n # removed after the os.path.exists() check and before the open(fname). This\n # isn't going to matter in practice, but on principle I've coded it in a\n # different way which I _think_ avoids that race condition.\n #\n # Technically fileExists is really more like \"file is a regular file and we\n # have permission to read it\", but the point is that if we can't read it\n # and errorOnNonexistent is False, then we want to return the default value\n # rather than error.\n fileExists = False\n try:\n with open(fname, \"r\") as f:\n fileExists = True\n return json.load(f)\n except:\n if not fileExists and not errorOnNonexistent:\n return default\n else:\n raise", "def read(self, args):\n assert self.exists(args=args)\n file_path = self.path(args)\n file_str = autofile.read_file(file_path)\n file_dat = self.reader_(file_str)\n assert self.checker_(file_dat)\n return file_dat", "def load_input_reader(input_params):\n if \"abstract\" in input_params:\n driver_name = input_params[\"abstract\"][\"format\"]\n elif \"path\" in input_params:\n input_file = input_params[\"path\"]\n driver_name = driver_from_file(input_file)\n else:\n raise errors.MapcheteDriverError(\n \"invalid input parameters %s\" % input_params)\n if driver_name not in available_input_formats():\n raise errors.MapcheteDriverError(\n \"driver %s not found in %s\" % (\n driver_name, available_input_formats())\n )\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n # instanciate dummy input reader to read metadata\n input_reader = v.load().InputData.__new__(\n v.load().InputData, input_params)\n if input_reader.METADATA[\"driver_name\"] == driver_name:\n return v.load().InputData(input_params)\n except (AttributeError, errors.MapcheteConfigError):\n pass\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name)", "def load_reader(path):\n if path[-4:] != '.pkl':\n path+='.pkl'\n with open(path,\"r+b\") as f:\n log(\"Loading reader from {}\".format(path))\n r = pickle.load(f)\n return r", "def _get_reader_fn(self, reader, reader_method=None, path=None) -> Callable:\n if reader_method is None and path is None:\n raise ExecutionEngineError(\n \"Unable to determine spark reader function without reader_method or path\"\n )\n\n if reader_method is None:\n reader_method = self.guess_reader_method_from_path(path=path)\n\n reader_method_op: str = reader_method.lower()\n try:\n if reader_method_op == \"delta\":\n return reader.format(reader_method_op).load\n return getattr(reader, reader_method_op)\n except AttributeError:\n raise ExecutionEngineError(\n f\"Unable to find reader_method {reader_method} in spark.\",\n )", "def get_reader_fn(input_fp=None):\n if input_fp is None:\n return OdpsTableReader if \"PAI\" in tf.__version__ else CSVReader\n\n if \"odps://\" in input_fp:\n return OdpsTableReader\n else:\n return CSVReader", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def guess_reader_method_from_path(path: str):\n path = path.lower()\n if path.endswith(\".csv\") or path.endswith(\".tsv\"):\n return \"csv\"\n elif (\n path.endswith(\".parquet\") or path.endswith(\".parq\") or path.endswith(\".pqt\")\n ):\n return \"parquet\"\n\n raise ExecutionEngineError(\n f\"Unable to determine reader method from path: {path}\"\n )", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)", "def read(self, file_name):\n msg = \"ReaderWriterBase::read called!\"\n raise NotImplementedError(msg)", "def read_file(path, source):\n if source == 'srim':\n return read_srim(path)\n elif source == 'astar':\n return read_astar(path)\n else:\n raise ValueError('Unknown data source {}'.format(source))", "def read_file(self) -> misc_.file_readers.FileReaderHandler:\n\t\treturn self._read_file", "def readFromFile(filename):\n raise NotImplementedError", "def read(self, filename=None):\n\t\tif filename is None:\n\t\t\tif hasattr(self, 'filename'):\n\t\t\t\tfilename = os.path.join(self.path, self.filename)\n\t\t\telse:\n\t\t\t\traise Exception, 'no filename given!'\n\t\tif os.path.splitext(filename)[1] in self.extensions and os.path.exists(filename):\n\t\t\tfor line in open(filename).readlines():\n\t\t\t\tself.read_line(line)" ]
[ "0.73507124", "0.6649994", "0.6541662", "0.6465156", "0.6096228", "0.5905837", "0.5818582", "0.5808917", "0.57055247", "0.5697342", "0.5619084", "0.5508928", "0.5467147", "0.5462562", "0.5446384", "0.5420934", "0.5397053", "0.5380353", "0.53490704", "0.5324267", "0.53047097", "0.5296706", "0.52854663", "0.52788246", "0.5269713", "0.52487534", "0.524558", "0.5211467", "0.5204139", "0.5189624" ]
0.7362303
0
Test pointwise arithmetic with stencil offsets across two functions in indexed expression format
def test_indexed_stencil(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base fa = a.function b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base fb = b.function eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def position_op(x, wfunc):\n return x*wfunc", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_grad_binary(func, motion, optimized, preserve_result, a, b):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, b)", "def test_coord_preceding_fs(self):", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_operators_functions_unavailable_for_geography(self):\n z = Zipcode.objects.get(code=\"77002\")\n point_field = \"%s.%s::geometry\" % (\n connection.ops.quote_name(City._meta.db_table),\n connection.ops.quote_name(\"point\"),\n )\n # ST_Within.\n qs = City.objects.filter(point__within=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"ST_Within({point_field}\", ctx.captured_queries[0][\"sql\"])\n # @ operator.\n qs = City.objects.filter(point__contained=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} @\", ctx.captured_queries[0][\"sql\"])\n # ~= operator.\n htown = City.objects.get(name=\"Houston\")\n qs = City.objects.filter(point__exact=htown.point)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} ~=\", ctx.captured_queries[0][\"sql\"])", "def tes_mod(self):\r\n x, y = ints('xy')\r\n fn = gof.DualLinker().accept(FunctionGraph([x,y], [x%y])).make_function()\r\n for a,b in ((0,1), (1,1), (0,-1), (1,-1), (-1,-1),\r\n (1,2), (-1,2), (1,-2), (-1,-2),\r\n (5,3), (-5,3), (5,-3), (-5,-3)\r\n ):\r\n self.assertTrue(fn(a,b) == a%b, (a,))", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def test_ex_2_5(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n program_reg_allocation = RegisterAllocation()\n program_instrs = compiler.compile_program(program, program_reg_allocation)\n\n wam = WAM()\n wam.execute(query_instrs)\n wam.execute(program_instrs)\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def intersection(x, y, f, p):", "def test_pow_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = x**3 + y**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), 0.0)\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2), 9.0)\n assert equals(f.derivative_at((y, y), {x: 1.5, y:2.5}, order=2), 15.0)\n f = (x-y)**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n -6.0*(1.5-2.5))", "def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def structured_pow(x, y):\r\n # see decorator for function body\r", "def test_get_functions(sersic_2d_image,segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n\n base_source = cat[0]\n\n source = base_source\n\n assert pf.get_source_position(source) == (base_source.maxval_xindex, base_source.maxval_yindex)\n assert pf.get_source_elong(source) == base_source.elongation.value\n assert pf.get_source_ellip(source) == base_source.ellipticity.value\n assert pf.get_source_theta(source) == base_source.orientation.to('rad').value\n\n x0, y0 = pf.get_source_position(source)\n ellip, theta = pf.get_source_ellip(source), pf.get_source_theta(source)\n\n assert np.round(pf.get_amplitude_at_r(200, sersic_2d_image, x0, y0 , ellip, theta), 6) == 0.036798", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def pyelemfunctions():\n for elemid in unique(top.idpyelem[:top.nppyelem]):\n ip = (top.idpyelem[:top.nppyelem] == elemid)\n x = top.xpyelem[:top.nppyelem][ip]\n y = top.ypyelem[:top.nppyelem][ip]\n z = top.zpyelem[:top.nppyelem][ip]\n # --- The conversion to int is needed since a numpy.int64 is different than an int.\n (ex,ey,ez,bx,by,bz) = pyelemfunctionsdict[int(elemid)](x,y,z)\n top.expyelem[:top.nppyelem][ip] = ex\n top.eypyelem[:top.nppyelem][ip] = ey\n top.ezpyelem[:top.nppyelem][ip] = ez\n top.bxpyelem[:top.nppyelem][ip] = bx\n top.bypyelem[:top.nppyelem][ip] = by\n top.bzpyelem[:top.nppyelem][ip] = bz", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)" ]
[ "0.54499704", "0.54236096", "0.54200417", "0.54183537", "0.53868484", "0.5386813", "0.5386671", "0.53676295", "0.53601813", "0.5349417", "0.53437924", "0.53436583", "0.5304866", "0.53009784", "0.5292884", "0.52877325", "0.5283028", "0.5282718", "0.5279577", "0.52767044", "0.5276021", "0.5260619", "0.5257482", "0.52488494", "0.52344966", "0.52331054", "0.5233018", "0.5230695", "0.52179277", "0.5217522" ]
0.70558316
0
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def position_op(x, wfunc):\n return x*wfunc", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_elemwise2():\r\n rng = numpy.random.RandomState(int(time.time()))\r\n shape = (3, 5)\r\n for pattern in [(0, 1), (1, 0)]:\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), name=None)\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *\r\n tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32'))", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def test_1d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(x):\n return [x**2, x]\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [2**2, 2])\n \n a = func(1)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def evaluable_view(self, stencil, offset=0):\n return self._evaluable_view(stencil, self.arr, offset)", "def test_fortran_frontend_view_test_2():\n test_name = \"view2_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,c,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,cc,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),cc(:,:,j))\nk=2\n call viewlens(aa(:,:,k),bb(:,:,k),cc(:,:,k))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n c = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, cc=c, n=10)\n assert (c[0, 0, 0] == 43)\n assert (c[1, 1, 1] == 84)", "def test_elemwise3():\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n new_val = (a + b).dimshuffle([2, 0, 3, 1])\r\n new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])\r\n f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(6), dtype='float32'))" ]
[ "0.70109046", "0.5731927", "0.5725207", "0.5534058", "0.5485873", "0.54508567", "0.5420271", "0.5402067", "0.53938526", "0.5361206", "0.53606236", "0.5359261", "0.5359202", "0.5320887", "0.5293664", "0.5291382", "0.5265721", "0.52385396", "0.523383", "0.52290106", "0.52282166", "0.52210116", "0.52201825", "0.52003807", "0.52002525", "0.51925", "0.5190279", "0.517582", "0.5171569", "0.5146713" ]
0.5903518
1
Test pointwise arithmetic with stencil offsets and open loop boundaries in indexed expression format
def test_indexed_open_loops(self, expr, result): i, j, l = dimify('i j l') pushed = [d.size for d in [j, l]] j.size = None l.size = None a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed fa = a.function fa.data[0, :, :] = 2. eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) j.size, l.size = pushed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_current_density_boundaries(self):\n t, x_n, x_p = self.t, self.x_n_edge, self.x_p_edge\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[-1]), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[-1]), i_cell, decimal=3)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def coordination(c, stencil=nn_stencil):\n\n coordination = np.zeros_like(c, dtype=int)\n for dx, dy in stencil:\n tmp = np.array(c, dtype=bool, copy=True)\n if dx != 0:\n tmp = np.roll(tmp, dx, 0)\n if dy != 0:\n tmp = np.roll(tmp, dy, 1)\n coordination += tmp\n return coordination", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def test_get_debug_values_success():\r\n\r\n prev_value = config.compute_test_value\r\n for mode in ['ignore', 'warn', 'raise']:\r\n\r\n try:\r\n config.compute_test_value = mode\r\n\r\n x = T.vector()\r\n x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)\r\n y = numpy.zeros((5, 5))\r\n\r\n iters = 0\r\n\r\n for x_val, y_val in op.get_debug_values(x, y):\r\n\r\n assert x_val.shape == (4,)\r\n assert y_val.shape == (5, 5)\r\n\r\n iters += 1\r\n\r\n assert iters == 1\r\n\r\n finally:\r\n config.compute_test_value = prev_value", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def test_conservation(self):\n t, x_n, x_s, x_p = self.t, self.x_n, self.x_s, self.x_p\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n for x in [x_n, x_s, x_p]:\n np.testing.assert_array_almost_equal(\n self.i_s(t, x) + self.i_e(t, x), i_cell, decimal=2\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_n), self.i_s_n(t, x_n), decimal=3\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_p), self.i_s_p(t, x_p), decimal=3\n )", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def test_outside_plus_inside(self):\n for region, bounds in load_region_bounds_dict().items():\n lon_bounds, lat_bounds = bounds\n for key in ['data01', 'ds_shift_lon', 'ds_rev_both', 'ds_irr_both']:\n outside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='outside')['PRECL']\n inside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='inside')['PRECL']\n outside_plus_inside = (np.nan_to_num(outside_data.values) +\n np.nan_to_num(inside_data.values))\n diff_from_input = outside_plus_inside - data_dict[key]['PRECL'].values\n assert np.abs(diff_from_input).max() == 0", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def unityTestChangeOfCoordinates(map, ClosedLoopData):\n TestResult = 1\n for i in range(0, ClosedLoopData.x.shape[0]):\n xdat = ClosedLoopData.x\n xglobdat = ClosedLoopData.x_glob\n\n s, ey, _, _ = map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5], xglobdat[i, 3])\n v1 = np.array([s, ey])\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n # print v1, v2, np.dot(v1 - v2, v1 - v2), np.dot(v3 - v4, v3 - v4)\n\n if np.dot(v3 - v4, v3 - v4) > 0.00000001:\n TestResult = 0\n print (\"ERROR\", v1, v2, v3, v4)\n pdb.set_trace()\n v1 = np.array(map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5]))\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n print (np.dot(v3 - v4, v3 - v4))\n pdb.set_trace()\n\n if TestResult == 1:\n print (\"Change of coordinates test passed!\")", "def evaluable_view(self, stencil, offset=0):\n return self._evaluable_view(stencil, self.arr, offset)", "def inner_perimeter(c, stencil=nn_stencil):\n\n return np.logical_and(c, coordination(c, stencil=stencil) < len(stencil))", "def _evaluate(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + (1 - alpha) * beta * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * (1 - beta) * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1](w[c], x[c])\n + alpha * beta * self.wxInterpolators[i][j](w[c], x[c])\n )\n return f", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)" ]
[ "0.6950202", "0.58628595", "0.56040186", "0.5595086", "0.5519812", "0.54418904", "0.54108006", "0.533119", "0.53181374", "0.53098184", "0.5292402", "0.5292149", "0.5247795", "0.5246134", "0.523379", "0.5228938", "0.5216184", "0.51775336", "0.51391", "0.513825", "0.51371264", "0.5125601", "0.51237714", "0.5118783", "0.5106406", "0.5094798", "0.50782824", "0.50654495", "0.50590676", "0.5056554" ]
0.6362784
1
Test calltime symbols overrides with other symbols
def test_override_symbol(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.) a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.) op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testSymbolHash(self):\n gScope = pykd.diaLoadPdb( str(target.module.pdb()) )\n symSet = set([ gScope[\"g_structTest\"], gScope[\"EnumWindowsProc1\"], gScope[\"g_structTest\"] ])\n self.assertEqual( 2, len(symSet) )\n self.assertTrue( gScope[\"g_structTest\"] in symSet )\n self.assertFalse( gScope[\"EnumWindowsProc2\"] in symSet )", "def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)", "def test_fix_code_typical_code():\r\n\r\n pass", "def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = symbol_first\n assert layer.symbol == symbol_first\n\n layer = Points(data, symbol=symbol_first)\n assert layer.symbol == symbol_first\n\n layer.symbol = symbol_second\n assert layer.symbol == symbol_second", "def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass", "def execute(self, symbol_table, test_mode=False):", "def execute(self, symbol_table, test_mode=False):", "def test_GetSymbolMapping_no_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\", \"\"]\n self.assertDictEqual({}, stack_utils.GetSymbolMapping(lines))", "def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def enable_named_call():\n global _use_named_call\n _use_named_call = True", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def stubFunc( *args, **keywords ):\n maya.cmds.dynamicLoad( library )\n # call the real function which has replaced us\n return maya.cmds.__dict__[command]( *args, **keywords )", "def test_patch_pci_switch(self):\n pass", "def run(self, *args, **kw):\n if kw.has_key('SYMROOT'):\n del kw['SYMROOT']\n super(TestGypBase, self).run(*args, **kw)", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def test_override_builtin(self):\n PyLoader.register(override_builtins=True)\n self.assertIs(PRIORITY_HOOKS['.py'], PyLoader)", "def test_stub(self):\n pass", "def test_patch_none():", "def test_patch_bios_unit(self):\n pass", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def test_010(self):\n caller = self.get_caller([SingleMethod])\n self.assertEqual(\"I have very little to say.\", caller())", "def __call__(fun_name):", "def setUp(self):\n self.Triton = TritonContext()\n self.Triton.setArchitecture(ARCH.X86_64)\n self.Triton.setMode(MODE.ONLY_ON_SYMBOLIZED, True)\n super(TestSymbolicEngineOnlySymbolized, self).setUp()", "def symbols(self):\n pass", "def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def compilation_test(interp, source):\r\n print '*** Compiling symbols from file: %s ***' % util.within_VCode(source)\r\n interp.cleanup_dictionary()\r\n interp.parse_symbols_from_file(source)\r\n print '\\n\\nParsed symbols are: '\r\n interp.print_symbols()\r\n print 'Unresolved abbreviations are:'\r\n unresolved = interp.peek_at_unresolved()\r\n sorted_unresolved = unresolved.keys()\r\n sorted_unresolved.sort()\r\n for an_abbreviation in sorted_unresolved:\r\n symbol_list = unresolved[an_abbreviation].keys()\r\n symbol_list.sort()\r\n print '\\'%s\\': appears in %s' % (an_abbreviation, str(symbol_list))\r\n \r\n print '\\n*** End of compilation test ***\\n'", "def assert_structural_equal_ignore_global_symbol(\n func1: PrimFunc,\n func2: PrimFunc,\n *args: Any,\n **kwargs: Any,\n) -> None:\n assert_structural_equal(\n func1.with_attr(\"global_symbol\", \"main\"),\n func2.with_attr(\"global_symbol\", \"main\"),\n *args,\n **kwargs,\n )", "def test_override_builtin_extension_without_explicit_flag(self):\n with self.assertRaises(ValueError):\n PyLoader.register()" ]
[ "0.6088717", "0.5891969", "0.586841", "0.57884955", "0.57511026", "0.5713668", "0.5713668", "0.57010543", "0.5664404", "0.5631575", "0.5563887", "0.55595315", "0.55191725", "0.5508196", "0.54970926", "0.54917103", "0.5480295", "0.54737866", "0.5444322", "0.54287255", "0.54046744", "0.54034895", "0.53810966", "0.5379911", "0.5378213", "0.5367286", "0.53602266", "0.53591675", "0.535334", "0.53306067" ]
0.58977205
1
Test calltime symbols overrides with numpy arrays
def test_override_array(self): i, j, k, l = dimify('i j k l') shape = tuple(d.size for d in (i, j, k, l)) a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = np.zeros(shape=shape, dtype=np.float32) + 3. a2 = np.zeros(shape=shape, dtype=np.float32) + 4. op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1, np.zeros(shape) + 6)) assert(np.allclose(a2, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_TimeArray_repr():", "def test_format_signature_numpy():", "def test_TimeArray_convert_unit():", "def test_TimeArray_copyflag():\r\n\r\n #These two should both generate a TimeArray, with one picosecond.\r\n #This one holds time_unit='s'\r\n t1 = ts.TimeArray(np.array([1], dtype=np.int64), copy=False)\r\n #This one holds time_unit='ps':\r\n t2 = ts.TimeArray(1, time_unit='ps')\r\n t3 = ts.TimeArray(t2, copy=False)\r\n npt.assert_equal(t1, t2)\r\n npt.assert_equal(t2.ctypes.data, t3.ctypes.data)", "def test_test_arraypointertype(self):\n input = \"\"\"\n void main () {\n float arr[3];\n arr[2]=1.5;\n foo(arr);\n arr[2] = foo(arr)[2] + 1.1;\n putFloatLn(arr[2]);\n }\n float[] foo(float x[]){\n x[2] = 5.1;\n return x;\n }\n \"\"\"\n expect = \"6.2\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,571))", "def test_isarray_gtiff(self):\n self.assertIsInstance(_test_array(landsat_gtiff), np.ndarray)", "def test_isarray_vrt(self):\n self.assertIsInstance(_test_array(landsat_vrt), np.ndarray)", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def lookup_array(self, *args, **kwargs): # real signature unknown\n pass", "def lookup_array(self, *args, **kwargs): # real signature unknown\n pass", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def test_Sobol_G_raises_error_if_values_not_numpy_array():\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n evaluate(x)", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test11(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), bcolz.carray(b)\n if self.vm == \"python\":\n cr = bcolz.eval(\"np.sin(c) + 2 * np.log(d) - 3\")\n elif self.vm == \"dask\":\n cr = bcolz.eval(\"da.sin(c) + 2 * da.log(d) - 3\")\n else:\n cr = bcolz.eval(\"sin(c) + 2 * log(d) - 3\")\n nr = np.sin(a) + 2 * np.log(b) - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_allclose(cr[:], nr, err_msg=\"eval does not work correctly\")", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def test02(self):\n a = np.arange(3, self.N, 4)\n ac = bcolz.arange(3, self.N, 4, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_timearray_var_prod():\r\n a = ts.TimeArray(list(range(10)))\r\n npt.assert_raises(NotImplementedError, a.var)\r\n npt.assert_raises(NotImplementedError, a.prod)", "def test_equitability(self):\n c = array([5])\n self.assertFloatEqual(equitability(c), 0)\n c = array([5,5])\n self.assertFloatEqual(equitability(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(equitability(c), 1)", "def test00(self):\n a = np.arange(self.N)\n ac = bcolz.arange(self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_operation_arg_array(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float array A =\\n\\t1, 5\\nGaussian(means=A) | 0\\n\")\n assert np.all(bb.operations[0][\"kwargs\"][\"means\"] == np.array([[1, 5]]))", "def with_numpy(func):\r\n return func", "def _test1():\n sys.argv.append('--Numeric')\n from . import numpytools as N\n verify(N)\n sys.argv[-1] = '--numarray'\n reload(N)\n verify(N)\n sys.argv[-1] = '--numpy'\n reload(N)\n verify(N)", "def test_op_isub_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __call__(self, par_dict: dict) -> np.ndarray:", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def test01(self):\n a = np.arange(3, self.N)\n ac = bcolz.arange(3, self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test03(self):\n a = np.arange(self.N, dtype=\"i1\")\n ac = bcolz.arange(self.N, dtype=\"i1\", rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def test06(self):\n dtype = np.dtype(\"object\")\n a = np.array([\"ale\", \"e\", \"aco\"], dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertEqual(ac.dtype, dtype)\n self.assertEqual(a.dtype, ac.dtype)\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test_array_spec_no_match():\n fcode = \"call hello()\"\n with pytest.raises(NoMatchError):\n Fortran2003.Ac_Spec(fcode)" ]
[ "0.6313198", "0.6234536", "0.61531353", "0.60441715", "0.5897211", "0.58689487", "0.5791092", "0.5729955", "0.5682435", "0.5682435", "0.5633471", "0.55964816", "0.55935395", "0.5578403", "0.5577986", "0.5554115", "0.5551436", "0.555043", "0.554579", "0.5534603", "0.55257624", "0.55253726", "0.55050606", "0.55000937", "0.5488189", "0.54770494", "0.54725534", "0.5459195", "0.5446116", "0.54443127" ]
0.6475489
0
Test that the dimension sizes are being inferred correctly
def test_dimension_size_infer(self, nt=100): i, j, k = dimify('i j k') shape = tuple([d.size for d in [i, j, k]]) a = DenseData(name='a', shape=shape).indexed b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed eqn = Eq(b[time, x, y, z], a[x, y, z]) op = Operator(eqn) _, op_dim_sizes = op.arguments() assert(op_dim_sizes[time.name] == nt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def dimensions():", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def test_reduce_dimensionality(base_bertopic, embeddings, shape):\n umap_embeddings = base_bertopic._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def getDimensions():", "def test_batch_size_pack_size():", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def dimension(self):", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def test_n_dim(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n hist0 = hg.Count()\n\n assert hist0.n_dim == 0\n assert hist1.n_dim == 1\n assert hist2.n_dim == 2\n assert hist3.n_dim == 3", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def test_point_within_dimensions_invalid_sizes():\n point = np.array([20, 20, 20])\n image_dimensions = np.array([100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)\n\n point = np.array([20, 20])\n image_dimensions = np.array([100, 100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def _automatic_dims(cls, dims, size):\n if dims is None:\n dims = size\n elif np.product(dims) != size:\n raise QiskitError(\"dimensions do not match size.\")\n if isinstance(dims, (int, np.integer)):\n num_qubits = int(np.log2(dims))\n if 2 ** num_qubits == size:\n return num_qubits * (2,)\n return (dims,)\n return tuple(dims)", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_vector_dimensions(self):\r\n # crear una lista 1-D (Horizontal, Entradas). \r\n Z = [1, 2, 3, 4, 5]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Notemos que las dimensiones de Z y W son diferentes.\r\n try:\r\n neuron = rhonn(W, Z)\r\n except ValueError as e:\r\n # Comprobamos que efectivamente hay un error en las dimensiones.\r\n self.assertEqual(type(e), ValueError)\r\n else:\r\n self.fail('El error no fue lanzado.')", "def test_ncols_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[2], 235)", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def _assert_same_size(outputs: TensorStruct, output_size: OutputSize):\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n for output, size in zip(flat_output, flat_output_size):\n if isinstance(size, torch.Size):\n if output[0].size() != size:\n raise ValueError('The output size does not matchthe required output_size')\n elif output[0].size()[-1] != size:\n raise ValueError('The output size does not match the required output_size')", "def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))", "def testQuestionThree(self):\n self.assertEqual(AnswerQuestionThree().shape, (10,), \"Question three's output is not one dimension.\") \n self.assertEqual(AnswerQuestionThree().size, 10, \"Question three's output is not 10 long.\")", "def test_get_dim(self):\n self.assertEqual(self.game.get_dim(), self.game._dim)", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")", "def test_get_dimension(self):\n\n v = Vector({ 'x': 1 })\n self.assertEqual(1, v.dimensions['x'])" ]
[ "0.76725674", "0.75539386", "0.74125654", "0.7359512", "0.73051524", "0.72323257", "0.7225344", "0.7185799", "0.71210706", "0.6941943", "0.68519884", "0.6851906", "0.68469083", "0.68195313", "0.6812142", "0.67658305", "0.6746414", "0.6741667", "0.6688384", "0.66588515", "0.66540855", "0.6652592", "0.6647712", "0.66447943", "0.6630445", "0.6628934", "0.6613684", "0.6611912", "0.6608618", "0.65892714" ]
0.76781756
0
Test Box with photon shooting. Particularly the flux of the final image.
def test_box_shoot(): rng = galsim.BaseDeviate(1234) obj = galsim.Box(width=1.3, height=2.4, flux=1.e4) im = galsim.Image(100,100, scale=1) im.setCenter(0,0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Box makePhot not equivalent to drawPhot" obj = galsim.Pixel(scale=9.3, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Pixel makePhot not equivalent to drawPhot" obj = galsim.TopHat(radius=4.7, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "TopHat makePhot not equivalent to drawPhot"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_box():\n savedImg = galsim.fits.read(os.path.join(imgdir, \"box_1.fits\"))\n myImg = galsim.ImageF(savedImg.bounds, scale=0.2)\n myImg.setCenter(0,0)\n test_flux = 1.8\n\n pixel = galsim.Pixel(scale=1, flux=1)\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel disagrees with expected result\")\n np.testing.assert_array_equal(\n pixel.scale, 1,\n err_msg=\"Pixel scale returned wrong value\")\n\n # Check with default_params\n pixel = galsim.Pixel(scale=1, flux=1, gsparams=default_params)\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel with default_params disagrees with expected result\")\n pixel = galsim.Pixel(scale=1, flux=1, gsparams=galsim.GSParams())\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel with GSParams() disagrees with expected result\")\n\n # Use non-unity values.\n pixel = galsim.Pixel(flux=1.7, scale=2.3)\n gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n pixel2 = galsim.Pixel(flux=1.7, scale=2.3, gsparams=gsp)\n assert pixel2 != pixel\n assert pixel2 == pixel.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n\n # Test photon shooting.\n do_shoot(pixel,myImg,\"Pixel\")\n\n # Check picklability\n do_pickle(pixel, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(pixel)\n do_pickle(galsim.Pixel(1))\n\n # Check that non-square Box profiles work correctly\n scale = 0.2939 # Use a strange scale here to make sure that the centers of the pixels\n # never fall on the box edge, otherwise it gets a bit weird to know what\n # the correct SB value is for that pixel.\n im = galsim.ImageF(16,16, scale=scale)\n gsp = galsim.GSParams(maximum_fft_size = 30000)\n for (width,height) in [ (3,2), (1.7, 2.7), (2.2222, 3.1415) ]:\n box = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp)\n check_basic(box, \"Box with width,height = %f,%f\"%(width,height))\n do_shoot(box,im,\"Box with width,height = %f,%f\"%(width,height))\n if __name__ == '__main__':\n # These are slow because they require a pretty huge fft.\n # So only do them if running as main.\n do_kvalue(box,im,\"Box with width,height = %f,%f\"%(width,height))\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(box.centroid, cen)\n np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(box.flux, test_flux)\n np.testing.assert_almost_equal(box.xValue(cen), box.max_sb)\n np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.-0.001), box.max_sb)\n np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.+0.001), 0.)\n np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.-0.001), 0.)\n np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.+0.001), 0.)\n np.testing.assert_array_equal(\n box.width, width,\n err_msg=\"Box width returned wrong value\")\n np.testing.assert_array_equal(\n box.height, height,\n err_msg=\"Box height returned wrong value\")\n\n gsp2 = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n box2 = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp2)\n assert box2 != box\n assert box2 == box.withGSParams(gsp2)\n assert box2 != box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n assert box2.withGSParams(maximum_fft_size=30000) == box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n\n # Check picklability\n do_pickle(box, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(box)\n do_pickle(galsim.Box(1,1))\n\n # Check sheared boxes the same way\n box = galsim.Box(width=3, height=2, flux=test_flux, gsparams=gsp)\n box = box.shear(galsim.Shear(g1=0.2, g2=-0.3))\n check_basic(box, \"Sheared Box\", approx_maxsb=True)\n do_shoot(box,im, \"Sheared Box\")\n if __name__ == '__main__':\n do_kvalue(box,im, \"Sheared Box\")\n do_pickle(box, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(box)\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(box.centroid, cen)\n np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(box.flux, test_flux)\n np.testing.assert_almost_equal(box.xValue(cen), box.max_sb)\n\n # This is also a profile that may be convolved using real space convolution, so test that.\n if __name__ == '__main__':\n conv = galsim.Convolve(box, galsim.Pixel(scale=scale), real_space=True)\n check_basic(conv, \"Sheared Box convolved with pixel in real space\",\n approx_maxsb=True, scale=0.2)\n do_kvalue(conv,im, \"Sheared Box convolved with pixel in real space\")\n do_pickle(conv, lambda x: x.xValue(0.123,-0.456))\n do_pickle(conv)", "def shoot(self):\n self.assertIsInstance(gun(3).shoot(), 2)\n self.assertIsInstance(gun(10).shoot(), 9)", "def shoot(self):\n if self.gun_interface:\n self.gun_interface.prepare_fire()", "def shoot(self):\n e = self.energy()\n y = self.rap()\n sqrt_pt2_m2 = e / math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n m = self.mass()\n pt = math.sqrt( sqrt_pt2_m2**2 - m**2 )\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def execute(self, cast):\n bricks = cast[\"brick\"] # there's only one\n paddle = cast[\"paddle\"][0] # there's only one\n ball = cast[\"ball\"] [0]\n score = cast[\"score\"] [0]\n lives = cast[\"lives\"] [0]\n \n ball_v = ball.get_velocity()\n paddle_xy = paddle.get_position()\n ball_xy = ball.get_position()\n ball_vx = Point.get_x(ball_v) \n ball_vy = Point.get_y(ball_v)\n \n if Point.get_y(ball_xy) == 1:\n position = Point(ball_vx, 1)\n ball.set_velocity(position)\n \n if Point.get_x(ball_xy) <= 2:\n position = Point(1, ball_vy)\n ball.set_velocity(position)\n\n if Point.get_x(ball_xy) >= 78:\n position = Point(-1, ball_vy)\n ball.set_velocity(position)\n\n paddle_x = Point.get_x(paddle_xy)\n paddle_y = Point.get_y(paddle_xy)\n ball_x = Point.get_x(ball_xy)\n ball_y = Point.get_y(ball_xy)\n\n if ball_y == 18 and lives._lives == 0:\n print(\"\"\"\n ██╗░░░██╗░█████╗░██╗░░░██╗  ██╗░░░░░░█████╗░░██████╗███████╗  ██╗░░██╗\n ╚██╗░██╔╝██╔══██╗██║░░░██║  ██║░░░░░██╔══██╗██╔════╝██╔════╝  ╚═╝░██╔╝\n ░╚████╔╝░██║░░██║██║░░░██║  ██║░░░░░██║░░██║╚█████╗░█████╗░░  ░░░██╔╝░\n ░░╚██╔╝░░██║░░██║██║░░░██║  ██║░░░░░██║░░██║░╚═══██╗██╔══╝░░  ░░░╚██╗░\n ░░░██║░░░╚█████╔╝╚██████╔╝  ███████╗╚█████╔╝██████╔╝███████╗  ██╗░╚██╗\n ░░░╚═╝░░░░╚════╝░░╚═════╝░  ╚══════╝░╚════╝░╚═════╝░╚══════╝  ╚═╝░░╚═╝\"\"\")\n sys.exit()\n elif ball_y == 19 and lives._lives > 0:\n position = Point(ball_vx, -1)\n ball.set_velocity(position)\n lives._lives -= 1\n lives.set_text(f\"Lives: {lives._lives}\")\n\n for _ in range(1, 11):\n\n if paddle_x == ball_x and paddle_y -1 == ball_y or ball_vx == 3:\n if ball_vx == 1 or ball_vx == 2:\n ball_vx = random.randint(1,2)\n elif ball_vx == -1 or ball_vx == -2 or ball_vx == -3:\n ball_vx = random.randint(-2,-1)\n position = Point(ball_vx, -1)\n ball.set_velocity(position)\n paddle_x += 1\n \n i = 0\n\n for brick in bricks:\n position = brick.get_position()\n brick_x = Point.get_x(position)\n brick_y = Point.get_y(position)\n\n if brick_x == ball_x and brick_y == ball_y:\n bricks.pop(i)\n score._points +=1\n score.set_text(f\"Score: {score._points}\")\n if ball_vy == 1:\n ball_vy = -1\n elif ball_vy == -1:\n ball_vy = 1\n\n if ball_vx == 1:\n ball_velocity = Point(-1, ball_vy)\n else:\n ball_velocity = Point(1, ball_vy)\n ball.set_velocity(ball_velocity)\n\n i += 1", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass", "def shoot(self):\n pt = self.pt()\n assert pt >= 0\n m = self.mass()\n assert m >= 0\n sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 )\n y = self.rap()\n e = sqrt_pt2_m2 * math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def start_shoot(self):\n shooter_state = String()\n shooter_state.data = \"shoot\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Shooting\")", "def boltshoot(self):\n if self.input.is_key_down('spacebar'):\n self.getWave().boltInit()", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def ballchange(self):\r\n self.picture+=1\r\n self.image=pong2.bballs[self.picture]\r\n if self.image==pong2.zeus:\r\n wow=games.Message(value=\"YOU NEED TO GET A LIFE!!!\", size=75, color=color.white, left=5, top=5, lifetime=10*games.screen.fps, after_death=None, is_collideable=False)\r\n games.screen.add(wow)", "def shoot(self):\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n theta = self.theta()\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot_boolet(self):\n angle = self.angle\n for i in range(3):\n bullet = BulletAlienDos(self.main_game, shooter=self.shooter)\n bullet.vector[0] = 0\n bullet.vector[1] = 1\n bullet.normalized_vector = bullet.vector.normalize()\n bullet.normalized_vector = bullet.normalized_vector.rotate(angle)\n angle -= self.angle\n self.main_game.alien_bullets.add(bullet)", "def shoot(self):\n return self.bot_client.send_command(_Command.Shoot)", "def __call__(self):\n return self.shoot()", "def test_fluxes(self):\n\n t, x_n, x_p, r_n, r_p = (\n self.t,\n self.x_n,\n self.x_p,\n self.r_n_edge,\n self.r_p_edge,\n )\n if self.model.options[\"particle\"] == \"uniform profile\":\n # Fluxes are zero everywhere since the concentration is uniform\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n else:\n if self.operating_condition == \"discharge\":\n if self.model.options[\"particle\"] == \"quartic profile\":\n # quartic profile has a transient at the beginning where\n # the concentration \"rearranges\" giving flux of the opposite\n # sign, so ignore first three times\n np.testing.assert_array_less(0, self.N_s_n(t[3:], x_n, r_n[1:]))\n np.testing.assert_array_less(self.N_s_p(t[3:], x_p, r_p[1:]), 0)\n else:\n np.testing.assert_array_less(\n -1e-16, self.N_s_n(t[1:], x_n, r_n[1:])\n )\n np.testing.assert_array_less(self.N_s_p(t[1:], x_p, r_p[1:]), 1e-16)\n if self.operating_condition == \"charge\":\n np.testing.assert_array_less(self.N_s_n(t[1:], x_n, r_n[1:]), 1e-16)\n np.testing.assert_array_less(-1e-16, self.N_s_p(t[1:], x_p, r_p[1:]))\n if self.operating_condition == \"off\":\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n\n np.testing.assert_array_almost_equal(0, self.N_s_n(t, x_n, r_n[0]), decimal=4)\n np.testing.assert_array_almost_equal(0, self.N_s_p(t, x_p, r_p[0]), decimal=4)", "def shoot(self):\n theta = self.theta()\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot(self):\n if self.cool_down_counter == 0 and self.specialfire_state == False:\n BULLET_SOUND.play()\n bullet = Bullet(self.x+10, self.y, self.bullet_img)\n self.bullets.append(bullet)\n self.cool_down_counter = 1", "def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)", "def check_ball_on_target():\n\n pass", "def shoot(self):\n shots = Shooting(self.rect.centerx, self.rect.bottom)\n # Adding the shots to sprite lists created\n all_sprites_list.add(shots)\n shooting_list.add(shots)", "def shoot(self):\n if self.direction == 'left':\n self.__temp_bullet = Bullet(self.position, 60)\n elif self.direction == 'forward':\n self.__temp_bullet = Bullet(self.position, 90)\n else:\n self.__temp_bullet = Bullet(self.position, 120)\n global game\n game.sprites.add(self.__temp_bullet)\n game.bullets.add(self.__temp_bullet)\n game.fuel_bar.modify(-1)\n sounds['phaser'].play()", "def shoot(self):\r\n bullet = Bullet(self.rect.centerx, self.rect.top)\r\n ammo.add(bullet)", "def modify_box_coordinates(self, image, poles_detected):\n for window, poles in poles_detected.items():\n # Let's consider all poles detected on an image and modify their coordinates.\n # If only one pole's been detected, just widen the box 60% both sides\n if len(poles) == 1:\n new_left_boundary = int(poles[0].BB_left * 0.4)\n new_right_boundary = int(poles[0].BB_right * 1.6) if int(poles[0].BB_right * 1.6) <\\\n image.shape[1] else (image.shape[1] - 2)\n # Move upper border way up, often when a pole is close up many components do not get\n # included in the box, as a result they do not get found\n new_top_boundary = int(poles[0].BB_top * 0.1)\n new_bot_boundary = int(poles[0].BB_bottom * 1.1) if int(poles[0].BB_bottom * 1.1) <\\\n image.shape[0] else (image.shape[0] - 2)\n\n poles[0].update_object_coordinates(left=new_left_boundary,\n top=new_top_boundary,\n right=new_right_boundary,\n bottom=new_bot_boundary)\n else:\n for pole in poles:\n # If we've got 1+ poles on one frame or image, hence the shot was likely taken from\n # further distance.\n\n # TO DO: Overlapping check here. If BBs overlap and a component happens to be in between,\n # it will be detected twice\n\n new_left_boundary = int(pole.BB_left * 0.9)\n new_right_boundary = int(pole.BB_right * 1.1) if int(pole.BB_right * 1.1) < \\\n image.shape[1] else (image.shape[1] - 2)\n new_top_boundary = int(pole.BB_top * 0.5)\n new_bot_boundary = int(pole.BB_bottom * 1.1) if int(pole.BB_bottom * 1.1) < \\\n image.shape[0] else (image.shape[0] - 2)\n\n pole.update_object_coordinates(left=new_left_boundary,\n top=new_top_boundary,\n right=new_right_boundary,\n bottom=new_bot_boundary)", "def computeAndInsertBox(self,**kwargs):\n if self.predefined_box is None:\n self.mm.neglect()\n return\n (pose,new_frame) = self.baxter.frame.computeTransformation() \n if pose is None:\n self.mm.neglect()\n return\n \n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n else:\n self.baxter.frame.setTF(self.predefined_box+'_'+side,pose)\n self.baxter.frame.waitUntilFrameUpdate(self.predefined_box+\"_\"+side)\n self.baxter.scene.createPredefinedBox(self.predefined_box+\"_\"+side,self.predefined_box)\n if self.learning:\n self.appendToTask(\"import tf_helper \\n\")\n self.appendToTask(\"side='%s'\\n\"%(side))\n self.appendToTask(\"baxter.bb.predefined_box='%s'\\n\"%(self.predefined_box))\n self.appendToTask(\"pose = tf_helper.PS('%s',%s,%s)\\n\"%(FRAME_ORIGIN,list(pose.pose.position),list(pose.pose.orientation)))\n self.appendToTask(\"baxter.frame.setTF('%s_'+side,pose)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.frame.waitUntilFrameUpdate('%s_'+side)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.scene.createPredefinedBox(baxter.bb.predefined_box+'_'+side,baxter.bb.predefined_box)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n self.appendToTask(\"for drop_off in baxter.scene.boxes[baxter.bb.predefined_box][1].keys():\\n\"%())\n self.appendToTask(\" pose = tf_helper.PS('%s_'+side,%s,%s)\\n\"%(self.predefined_box,\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][0:3]\",\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][3:7]\"))\n self.appendToTask(\" baxter.frame.setTF(drop_off+'_'+side,pose)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n for drop_off in self.baxter.scene.boxes[self.predefined_box][1].keys():\n pose = PS(self.predefined_box+'_'+side,self.baxter.scene.boxes[self.predefined_box][1][drop_off][0:3],self.baxter.scene.boxes[self.predefined_box][1][drop_off][3:7])\n self.baxter.frame.setTF(drop_off+'_'+side,pose)\n self.mm.confirm()", "def bfm_shoot_movement(self):\n\n # Move the feed motor forward, wait for it to get caught into the flywheels, then come back\n # Currently waiting 1.1 seconds each direction of the bfm movement\n # 2.2 seconds total\n # So wait (ROF-2.2)/2 in each direction and hope that the LAX ball has fallen by then\n\n if self.drill_name is not None:\n time.sleep((self.rof-2.2)/2)\n self.bfm.move_forward()\n self.bfm.move_backward()\n if self.drill_name is not None:\n time.sleep((self.rof-2.2)/2)", "def test_photon_flux_conversion(self):\n init_wl = np.linspace(300, 500, num=10)\n init_spec = np.ones(init_wl.shape)\n\n test_spec_base = Spectrum(init_wl, init_spec, 'nm', is_photon_flux=False)\n spectrum = test_spec_base.get_spectrum('nm', to_photon_flux=True)\n\n expect_spec = init_spec / (sc.h * sc.c / (init_wl*1e-9))\n\n assert np.all(np.isclose(spectrum[1, :], expect_spec))", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]" ]
[ "0.6883019", "0.60247976", "0.5729917", "0.5662367", "0.5657805", "0.56022036", "0.5602087", "0.55716115", "0.5532589", "0.5502499", "0.5473831", "0.54660267", "0.54527724", "0.54423535", "0.5430135", "0.5401244", "0.5400176", "0.53518057", "0.53516215", "0.53498757", "0.5337529", "0.53322077", "0.5329076", "0.53124714", "0.5286162", "0.52783287", "0.5278177", "0.523128", "0.52156144", "0.52107227" ]
0.72881216
0
Decide whether to enter hotspot mode or wifi mode and then do so
def set_wifi_mode(args): pass """+ try: if args['mode'] == 'hotspot': logger.info('will enter hotspot mode') #TODO - Need to capture the line that contains interface [some lan id] and uncomment it. change_file_line(path.join('/etc', 'dhcpcd.conf'), interface_l1_res, 'interface {}\n'.format() return True if args['silent'] else 'Ok' if args['mode'] == 'wi-fi': logger.info('will enter wi-fi mode') return True if args['silent'] else 'Ok' else: logger.error('Unknown wi-fi mode: {}'.format(args['mode'])) return False if args['silent'] else 'ERROR' except: logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1])) return False if args['silent'] else 'ERROR' """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cycle_wifi(mode=None):\n call(['ifdown', settings.WIFI_INTERFACE])\n if mode is not None:\n call(['iwconfig', settings.WIFI_INTERFACE, 'mode', mode])\n call(['ifup', settings.WIFI_INTERFACE])", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def system(self, mode=None):\n if mode == System.AUTO:\n self.change_request[\"SystemSwitch\"] = System.AUTO\n elif mode == System.COOL:\n self.change_request[\"SystemSwitch\"] = System.COOL\n elif mode == System.HEAT:\n self.change_request[\"SystemSwitch\"] = System.HEAT\n elif mode == System.OFF:\n self.change_request[\"SystemSwitch\"] = System.OFF\n else:\n return False\n return self.change_request[\"SystemSwitch\"]", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def _setup_wifi_ap(self):\n context = self._get_ap_context()\n try:\n check_output(['ifconfig', context['hostname']])\n logger.info('wifi ap {} already setup'.format(context['hostname']))\n return True\n except CalledProcessError:\n logger.info('Setting up virtual access point interface')\n call(['service', 'hostapd', 'stop'])\n call(['service', 'dnsmasq', 'stop'])\n\n self._write_system_template('/etc/dnsmasq.conf', 'access_point/dnsmasq.conf')\n self._write_system_template('/etc/hostapd/hostapd.conf', 'access_point/hostapd.conf', context)\n self._write_system_template('/etc/network/interfaces', 'access_point/interfaces.conf', context)\n self._write_system_template('/etc/default/hostapd', 'access_point/default_hostapd.conf', context)\n self._write_system_template('/etc/dhcpcd.conf', 'access_point/dhcpcd.conf', context)\n \n call(['systemctl', 'enable', 'hostapd', ])\n call(['systemctl', 'enable', 'dnsmasq', ])\n return True", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])", "def connect_type(word_list):\n if 'wlan0' in word_list or 'wlan1' in word_list:\n con_type = 'wifi'\n elif 'eth0' in word_list:\n con_type = 'ethernet'\n else:\n con_type = 'current'\n\n return con_type", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def test_wifi_scanner_with_wifi_off(self):\n self.log.debug(\"Make sure wifi is off.\")\n wutils.wifi_toggle_state(self.dut, False)\n self.start_wifi_scanner_single_scan_expect_failure(\n self.default_scan_setting)\n self.log.debug(\"Turning wifi back on.\")\n wutils.wifi_toggle_state(self.dut, True)", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "def set_airplane_mode(self, action):\n\n is_action_performed = False\n is_airplane_mode_on_off_visible = False\n\n settings_more_button = \\\n 'self.android_locators.SETTINGS_MORE_BUTTON_ByXPATH'\n airplane_mode_on_off_toggle = \\\n 'self.android_locators.AIRPLANE_MODE_ON_OFF_ByID'\n\n if self.phone_info.phone_type == PhoneType.IOS:\n airplane_mode_on_off_toggle = \\\n 'self.ios_locators.AIRPLANE_MODE_ON_OFF_ByXPATH'\n no_sim_card_installed_msg = \\\n 'self.ios_locators.NO_SIM_CARD_INSTALLED_ByXPATH'\n no_sim_card_installed_ok_button = \\\n 'self.ios_locators.NO_SIM_CARD_INSTALLED_OK_BUTTON_ByXPATH'\n\n try:\n try:\n # verify that Airplane Mode Button is visible\n is_airplane_mode_on_off_visible = self.find_element(\n self.driver.appium_driver,\n airplane_mode_on_off_toggle, 0).is_displayed()\n except:\n logger.debug(\n \"Airplane Mode ON/OFF button is currently not visible\")\n\n if self.phone_info.phone_type == PhoneType.ANDROID:\n if not is_airplane_mode_on_off_visible:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n time.sleep(1)\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, settings_more_button, 5)\n self.find_element(self.driver.appium_driver,\n settings_more_button, 1).click()\n # self.wait_till_element_to_be_visible(\n # self.driver.appium_driver, airplane_mode_on_off_toggle)\n logger.debug(\n \"Click on more button to make Airplane Mode visible\")\n\n airplane_mode_toggle_status = self.find_element(\n self.driver.appium_driver,\n airplane_mode_on_off_toggle).text\n if airplane_mode_toggle_status.upper() == action.upper():\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle, 0).click()\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n self.driver.appium_driver.back()\n\n elif self.phone_info.phone_type == PhoneType.IOS:\n if not is_airplane_mode_on_off_visible:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n time.sleep(1)\n airplane_mode_toggle_status = self.find_element(\n self.driver.appium_driver, airplane_mode_on_off_toggle,\n 0).text\n\n if action.upper() == \"ON\":\n if (airplane_mode_toggle_status == False) or \\\n (airplane_mode_toggle_status == '1'):\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n try:\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 3)\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 0).click()\n except:\n pass\n\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle,\n 0).click()\n try:\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 3)\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 0).click()\n except:\n pass\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n elif action.upper() == \"OFF\":\n if (airplane_mode_toggle_status == True) or \\\n (airplane_mode_toggle_status == '0'):\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle,\n 0).click()\n time.sleep(1)\n\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n logger.debug(\n \"Only ON/OFF operation is possible with Airplane \"\n \"Mode. {} option is not permitted\".format(\n action))\n\n except Exception as e:\n logger.error(\n \"Exception occured while performing Airplane mode {} \".format(\n action))\n logger.error(repr(e))\n\n return is_action_performed", "def wifi_connect(self, vap: VirtualAPHostapd) -> bool:\n config_file_name = \"boardfarm_tmp.conf\"\n config_file_path = \"/tmp/{}\".format(config_file_name)\n\n # Create network configuration for SSID\n bssid = \"bssid={}\".format(vap.bssid)\n ssid = \"ssid=\\\"{}\\\"\".format(vap.get_ssid())\n key = \"psk=\\\"{}\\\"\".format(vap.get_psk())\n network_config = \"network={{\\n{}\\n{}\\n{}\\n}}\".format(bssid, ssid, key)\n # Clean up previous configuration\n self.sendline(\"rm -f \\\"{}\\\"\".format(config_file_path))\n self.expect(self.prompt)\n self.sendline(\"echo \\'{}\\' > \\\"{}\\\"\".format(network_config, config_file_path))\n self.expect(self.prompt)\n # Start wpa_supplicant with created configuration\n # Typical coommand on RPI: wpa_supplicant -B -c/tmp/temp.conf -iwlan0 -Dnl80211,wext\n self.sudo_sendline(\"wpa_supplicant -B -D{} -i{} -c{}\".format(\n self.driver_name, self.iface_wifi, config_file_path))\n self.expect(\"Successfully initialized wpa_supplicant\")\n return bool(self.match)", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def set_mode(vehicle, mode):\n util.log_info(\"Setting %s.\" % mode)\n shared.status['manual_mode'] = mode\n vehicle.mode = VehicleMode(mode)\n \n wait_count = 0 \n while True:\n time.sleep(.2)\n wait_count = wait_count + 1\n \n if vehicle.mode.name == mode :\n return True\n \n elif wait_count >= 45:\n util.log_warning(\"Unable to set %s. Assume link lost.\" % mode)\n shared.status['abort'] = True\n return False\n \n elif wait_count % 15 == 0 :\n util.log_warning(\"Retry setting %s\" % mode)\n vehicle.mode = VehicleMode(mode) # resend command", "def setup():\n print('Setup option is not working')\n quit()\n print('Long press the reset button until the blue Led is blinking quickly')\n print('Long press again until blinking slowly')\n print('Manually connect this device to the Wifi SSID named BlroadlinkProv')\n print('Press security mode (0 = none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)')\n print('Default:3')\n\n security = raw_input('Security mode:').lower()\n\n if security == 'none':\n security = 0\n elif security == 'wep':\n security = 1\n elif security == 'wpa1':\n security = 2\n elif (security == 'wpa2') or (security == ''):\n security = 3\n elif security == 'wpa1/2':\n security = 4\n security = int(security)\n if not(0 <= security <= 4):\n raise IndexError\n\n ssid = raw_input('SSID of your router :')\n if security != 0:\n password = raw_input('Password:')\n else:\n password = ''\n broadlink.setup(ssid, password, security)", "def connect(self):\n self.sta_if = network.WLAN(network.STA_IF)\n self.sta_if.active(False)\n sleep(1)\n self.sta_if.active(True)\n\n dbg(\"Interface active\")\n if self.check_ap(self.ssid):\n # connect to access point\n if not self.sta_if.isconnected():\n dbg('connecting to AP...')\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.key)\n while not self.sta_if.isconnected():\n machine.idle()\n # Do we need a timeout here?\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"WLAN already connected\")\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"Target SSID not found.\")\n reset(\"Could not connect to network - target SSID is not availble.\", HARD)", "def set_into_managed_mode(wifi_name):\n \n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"iwconfig\", wifi_name, \"mode\", \"managed\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])\n subprocess.run([\"service\", \"NetworkManager\", \"start\"])", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def joinwifi():\n station = network.WLAN(network.STA_IF) # initiate a station mode\n\n if not station.isconnected():\n print('connecting to network:', ssid())\n station.active(True)\n station.connect(ssid(), password())\n \n\n while not station.isconnected():\n pass\n\n # deactivating access point mode\n ap = network.WLAN(network.AP_IF)\n ap.active(False)\n\n ip = station.ifconfig()[0]\n print('connected as:', ip)\n\n return ip", "def init_wlan_sta():\n\n print('WLAN: STA mode')\n wlan.init(mode=WLAN.STA)\n if not wlan.isconnected():\n wlan.connect(WLAN_SSID, auth=WLAN_AUTH, timeout=5000)\n while not wlan.isconnected():\n machine.idle() # save power while waiting", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "def __auto_mode(self):\n while True:\n # establish connection\n while True:\n if self.android_api.is_connect():\n break\n self.android_api.init_bluetooth()\n time.sleep(0.05)\n\n\n if self.android_api.is_map_empty():\n if self.production:\n # self.print_msg(\"Waiting for map update\")\n time.sleep(0.05)\n continue\n else:\n self.__test_run_pipeline_style()\n else:\n self.print_msg(\"Updating map\")\n self.android_api.map_pop_n_exe()\n time.sleep(0.05)", "def check_enable_mode(self, *args, **kwargs):\n pass", "def checkWifi():\n try:\n subprocess.check_output(\"iwgetid\")\n return True\n except subprocess.CalledProcessError: # if not connected\n return False", "def do_internet(self, args):\n\n device = \\\n self._get_choice_(\"ahd\", self.ahds(), \"For which account holder device?\")\n device.toggle_internet()\n print(\"Device is now {}.\\n\".format([\"offline\", \"online\"][device.internet_connection]))", "def connect():\n \n print(\"*****Starting connection*****\")\n \n ssid = id_key.network_id #hidden ssid\n key = id_key.network_key #hidden key\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"*****Already connected*****\")\n return\n \n station.active(True)\n station.connect(ssid, key)\n \n while station.isconnected() == False:\n pass\n \n print(\"*****Connection successful*****\")\n print(station.ifconfig())", "def turn_on(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Starting all torrents\")\n self._tm_client.api.start_torrents()\n elif self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission on\")\n self._tm_client.api.set_alt_speed_enabled(True)\n self._tm_client.api.update()" ]
[ "0.6191428", "0.60795987", "0.60477906", "0.5971737", "0.59445924", "0.58588445", "0.57119495", "0.570681", "0.57007", "0.5689363", "0.5664943", "0.5628451", "0.5603096", "0.5597446", "0.55574036", "0.55567616", "0.5528155", "0.55259633", "0.5525236", "0.5521546", "0.5498724", "0.54806113", "0.54776305", "0.54715896", "0.54531723", "0.5424893", "0.5418745", "0.54014814", "0.5397726", "0.5395694" ]
0.71384984
0
Function that returns true if a string contains a number
def hasNumbers(inputString): return any(char.isdigit() for char in inputString)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def has_number(any_string):\n return any(char.isdigit() for char in any_string)", "def has_num(text):\n return any(str.isdigit(c) for c in text)", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_some_number(mystring):\n # print(Bcolors.cyan + re.findall(r\".*\\\\(.*)\", inspect.stack()[0][1])[0] + \" --- \"\n # + inspect.stack()[0][3] + \"()\" + Bcolors.ENDC)\n mystring = str(mystring)\n mystring = re.sub(\",\", \".\", mystring)\n try:\n if float(mystring):\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def isnum(self, x):\n\n return x in '1234567890.-'", "def string_contains_digits(self, string):\n return bool(self.compiledDigitRegex.search(string))", "def _contains_number(text):\n return any((True for n in xrange(10) if str(n) in text))", "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number_repl_isnumeric(s):\n return s.replace('.', '', 1).isnumeric()", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def is_number(c):\n return '0' <= c <= '9'", "def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False", "def is_number(text):\n return text.lower() in AVRO_NUMBERS", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def is_number_repl_isdigit(s):\n return s.replace('.', '', 1).isdigit()", "def only_numbers(string):\n for character in string[:-1]:\n if not (character.isdigit() or character in (',', ' ')): \n return False\n return True", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False", "def is_digit_regex(s: str) -> bool:\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumeric(string, needHexPrefix):\n return (True)", "def is_number_regex(s):\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True" ]
[ "0.86861813", "0.83668447", "0.82383734", "0.7930955", "0.79304606", "0.7924505", "0.78930366", "0.78647584", "0.77653086", "0.7743263", "0.7714202", "0.7706238", "0.77058214", "0.77046204", "0.77044505", "0.7690902", "0.766317", "0.7639565", "0.7527607", "0.7527464", "0.75087285", "0.74870706", "0.74678373", "0.74541515", "0.7438179", "0.74299467", "0.74299467", "0.74299467", "0.7428709", "0.740295" ]
0.8393111
1
Function that saves the return_list from make_time to a file called yt_vids.txt Optional, default False
def save_link_time(return_list, path_to_download): # Opens a new file and writes lines to it and saves it at the spot provided with open(os.path.join(path_to_download, "yt_vids.txt"), "w") as w: w.write('\n'.join('{} {} {}'.format( x[0], x[1][0], x[1][1]) for x in return_list))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_save_list_to_file(self):\n task_list = TaskList()\n task1 = Task()\n output_file_path = self.testing_files[0]\n task1.set_name('Test Task One')\n task1.set_minutes(30)\n task1.set_notes('This is a great test task')\n task_list.add_task(task1)\n\n self.assert_equal(True, task_list.save_to_file(output_file_path))\n self.assert_equal(True, os.path.isfile(output_file_path))", "def save_file():\n generic = pull_list()\n result = list()\n i = 0\n while True:\n try:\n if generic[i].startswith('CVE'):\n cve_pattern = \"^CVE-\\d+-\\d+|^CVE-\\d+-[X]+\"\n header = re.findall(cve_pattern, generic[i])[0]\n i += 1\n notes = list()\n while not generic[i].startswith('CVE'):\n commit_pattern = \"http[s]?:\\/\\/.+commit\\/[\\S]+\"\n if re.search(commit_pattern, generic[i]):\n link = re.findall(commit_pattern, generic[i])\n notes.append(link[0])\n i += 1\n if notes != list():\n result.append(Data(header, notes))\n except IndexError:\n print('Finished')\n break\n return result", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def outList(self,list=False):\n txt = ''\n txt += 'echo \">>> list of expected files on output sandbox\"\\n'\n listOutFiles = []\n stdout = 'CMSSW_$NJob.stdout'\n stderr = 'CMSSW_$NJob.stderr'\n if len(self.output_file) <= 0:\n msg =\"WARNING: no output files name have been defined!!\\n\"\n msg+=\"\\tno output files will be reported back/staged\\n\"\n common.logger.info(msg)\n\n if (self.return_data == 1):\n for file in (self.output_file):\n listOutFiles.append(numberFile(file, '$OutUniqueID'))\n for file in (self.output_file_sandbox):\n listOutFiles.append(numberFile(file, '$NJob'))\n listOutFiles.append(stdout)\n listOutFiles.append(stderr)\n listOutFiles.append('Watchdog_$NJob.log.gz')\n\n txt += 'echo \"output files: '+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'filesToCheck=\"'+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'export filesToCheck\\n'\n taskinfo={}\n taskinfo['outfileBasename'] = self.output_file\n common._db.updateTask_(taskinfo)\n\n if list : return self.output_file\n return txt", "def export_time(z, name, save):\r\n list_dicts = []\r\n csv_columns = [\"initial size\", \"embed algo\", \"regression\", \"time\"]\r\n csv_file = os.path.join(\"..\", save, \"{} times_1.csv\".format(name))\r\n keys = list(z.keys())\r\n for key in keys:\r\n if \" + \" in key:\r\n se = z[key]\r\n initial_method = se.initial_method\r\n method = se.embedding_method\r\n for j in range(len(se.list_dicts_embedding)):\r\n data_results = {\"initial size\": se.initial_size[j], \"embed algo\": initial_method, \"regression\": method,\r\n \"time\": se.times[j]}\r\n list_dicts.append(data_results)\r\n else:\r\n data_results = {\"initial size\": \"\", \"embed algo\": key, \"regression\": \"\", \"time\": z[key][2]}\r\n list_dicts.append(data_results)\r\n with open(csv_file, 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\r\n writer.writeheader()\r\n for data in list_dicts:\r\n writer.writerow(data)", "def write_list_file(output_file, clip_list_arr):\n list_file = output_file+'_clip_list.txt'\n print \"list_file: \", list_file\n f = open(list_file, 'w')\n for clip in clip_list_arr:\n line = 'file '+clip\n f.write(\"%s\\n\" % line)\n # Add in a divider movie between clips? (it could go here)\n f.close()\n # print 'list_file', list_file\n # print clip_list_arr\n\n return list_file", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_to_file(self):\n name = datetime.today().date()\n with open(f'{name}.csv', 'w', newline='') as file_create:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_create, fieldnames=fieldnames)\n writer.writeheader()\n while datetime.today() < self.track_to:\n value_of_currency = PriceTracker.track_price()\n with open(f'{file_create.name}', 'a', newline='') as file_append:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_append, fieldnames=fieldnames)\n writer.writerow({'date': datetime.today().strftime(\"%H:%M:%S\"), 'value_in_pln': value_of_currency})\n\n self.check_min_value(tracked_price=value_of_currency)\n sleep(1)\n\n return self.generate_report(file_create.name)", "def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')", "def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n\n if not os.path.exists(tracker.results_dir):\n print(\"create tracking result dir:\", tracker.results_dir)\n os.makedirs(tracker.results_dir)\n if seq.dataset in ['trackingnet', 'got10k']:\n if not os.path.exists(os.path.join(tracker.results_dir, seq.dataset)):\n os.makedirs(os.path.join(tracker.results_dir, seq.dataset))\n '''2021.1.5 create new folder for these two datasets'''\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n else:\n base_results_path = os.path.join(tracker.results_dir, seq.name)\n\n def save_bb(file, data):\n tracked_bb = np.array(data).astype(int)\n np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n def save_time(file, data):\n exec_times = np.array(data).astype(float)\n np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n def save_score(file, data):\n scores = np.array(data).astype(float)\n np.savetxt(file, scores, delimiter='\\t', fmt='%.2f')\n\n def _convert_dict(input_dict):\n data_dict = {}\n for elem in input_dict:\n for k, v in elem.items():\n if k in data_dict.keys():\n data_dict[k].append(v)\n else:\n data_dict[k] = [v, ]\n return data_dict\n\n for key, data in output.items():\n # If data is empty\n if not data:\n continue\n\n if key == 'target_bbox':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_boxes':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_boxes.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}_all_boxes.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_scores':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_scores.txt'.format(base_results_path, obj_id)\n save_score(bbox_file, d)\n else:\n # Single-object mode\n print(\"saving scores...\")\n bbox_file = '{}_all_scores.txt'.format(base_results_path)\n save_score(bbox_file, data)\n\n elif key == 'time':\n if isinstance(data[0], dict):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n save_time(timings_file, d)\n else:\n timings_file = '{}_time.txt'.format(base_results_path)\n save_time(timings_file, data)", "def save_list(todo_list, save_location):\r\n data_file_w = open(save_location,\r\n \"w\") # open the save file and clear the data from it\r\n data_file_w.write(\"Warning: The Todo-List Program will not be able to \"\r\n \"load this save file if it is incorrectly modified. \"\r\n \"Modify at your own risk. The structure is Entry \"\r\n \"Text, Entry Priority as a number, Entry Group as a \"\r\n \"number (Not Yet Utilized, but necessary), and Entry \"\r\n \"Visibility as a boolean, each on a separate line, a \"\r\n \"single line gap in between, and the \"\r\n \"very first line is skipped\\n\")\r\n for item in todo_list:\r\n data_file_w.write(\"{0}\\n{1}\\n{2}\\n{3}\\n\\n\".format(item.text,\r\n str(item.priority),\r\n str(item.group),\r\n str(item.visible)))\r\n data_file_w.close()\r\n return", "def save_time_spent(self):\n\n ratings_dir = Path(self.out_dir).resolve() / cfg.suffix_ratings_dir\n if not ratings_dir.exists():\n makedirs(ratings_dir, exist_ok=True)\n\n timer_file = ratings_dir / '{}_{}_{}'.format(\n self.vis_type, self.suffix, cfg.file_name_timer)\n\n lines = '\\n'.join(['{},{}'.format(sid, elapsed_time)\n for sid, elapsed_time in self.timer.items()])\n\n # saving to disk\n try:\n with open(timer_file, 'w') as tf:\n tf.write(lines)\n except:\n print('Unable to save timer info to disk -- printing them to log:')\n print(lines)\n raise IOError('Error in saving timer info to file!')\n\n # printing summary\n times = np.array(list(self.timer.values()))\n if len(times) < 10:\n print('\\n\\ntimes spent per subject in seconds:\\n{}'.format(lines))\n\n print('\\nMedian time per subject : {} seconds'.format(np.median(times)))\n print('\\t5th and 95th percentile of distribution of times spent '\n ': {} seconds'.format(np.nanpercentile(times, [5, 95])))", "def writeRawFCD():\n global vehId, vehIdDict\n vehIdDict = {}\n vehId = 0\n day = 0\n\n def getVehId(orgId):\n \"\"\"creates new vehicle id's which consists only numerics\"\"\"\n global vehId, vehIdDict\n value = vehIdDict.get(orgId, vehId)\n if value is vehId:\n vehIdDict[orgId] = vehId\n vehId = (vehId + 1) % 65500\n return value\n\n outputFile = open(path.FQrawFCD, 'w')\n\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets():\n day += 86400\n # reset dict so that every taxi (even if the vehicle is chosen several\n # times) gets its own id\n vehIdDict = {}\n # dataset=0\n sortedKeys = vtypeDictR.keys()\n sortedKeys.sort()\n for timestep in sortedKeys:\n taxiList = vtypeDictR[timestep]\n for tup in taxiList: # all elements in this timestep\n # calc timestep ->for every period /quota set a new day\n time = timestep + day\n time = calcTime.getDateFromDepart(time)\n # dataset+=1\n # print ouptut\n # veh_id date (time to simDate+time) x (remove and\n # set comma new)\n outputFile.write(str(getVehId(tup[0])) + '\\t' + time + '\\t' + tup[3][0:2] + '.' + tup[3][2:7] + tup[3][8:] +\n # y (remove and set comma new)\n # status speed form m/s in km/h\n '\\t' + tup[4][0:2] + '.' + tup[4][2:7] + tup[4][8:] + '\\t' + \"90\" + '\\t' + str(int(round(tup[2] * 3.6))) + '\\n')\n # print dataset, time\n print(vehId)\n outputFile.close()", "def create_checkfile(artist_list):\n with open(\"checkfile4.txt\", 'w') as checkfile: # we are creating new file named checkfile, hence method r for write\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks: # NOTE: we change below from 2.name back to 2.title\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format(new_artist, new_album, new_song),\n file=checkfile)\n\n # NOTE: python 2 does not allow print above where you have {0.name} etc\n # To run this pring format in python 2, you need to import print_function at the top of code using:\n # from __future__ import print_function", "def dump_to_file(final_results):\n\t#Add prefix result\n\tif final_results[\"Results\"][\"Test passed\"] == True:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_PASSED.json\"\n\telse:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_FAILED.json\"\n\twith open(ouput_filepath, 'w') as fp:\n\t\tjson.dump(final_results, fp)\n\treturn ouput_filepath", "def outw():\n # make the record string\n # ok, pack em up...\n outstr = \"\".join(outlist)\n print(outstr)\n print(len(outstr))\n # of = open(\"workfile\", \"w\")\n of = open(\"workfile\", \"a\")\n # of.write(\\n)\n of.write(outstr)\n of.close()", "def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))", "def write_vote_ids():\r\n\r\n open_file = open(os.path.join('data', 'vote_ids.txt'), 'w')\r\n open_file.write(\"\\n\".join(vote_set))", "def save(self, filename, format = \"text\"):\n #\n for time in self.mdvtc.keys():\n if format == \"csv\":\n save_filename = filename + str(int(time)) + \".csv\"\n elif format == \"text\":\n save_filename = filename + str(int(time)) + \".txt\"\n else:\n save_filename = filename + str(int(time)) + \".txt\"\n self.mdvtc[time].save(save_filename, format)", "def save_current_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\" # hard coding this due to CRON, but will remove later\n output_file = open(\"last_time_run\", \"w\")\n current_time_string = datetime.datetime.strftime(\n datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"\n )\n output_file.write(current_time_string)\n print(current_time_string)\n output_file.close()", "def writeout(self):\n out_file = ''.join(['theta_w_t', str(self.t), '.dat'])\n data_list = [] \n\n for i in xrange(self.n_params): \n data_list.append( self.theta_t[i,:] ) \n\n data_list.append(self.w_t)\n\n np.savetxt(\n out_file, \n (np.vstack(np.array(data_list))).T, \n delimiter='\\t'\n )\n\n return None", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def save_to_csv(list_return, name, fieldnames):\n os.makedirs(os.path.dirname(name + '.csv'), exist_ok=True)\n with open(name + '.csv', 'w') as csvfile:\n csvfile.write(','.join(map(str, field_names)))\n csvfile.write('\\n')\n write = csv.writer(csvfile, delimiter=',')\n for x in range(0, len(list_return)):\n write.writerow(list_return[x])", "def main():\n parser = argparse.ArgumentParser(description='Saves MtGox trades for a time period')\n parser.add_argument('-s','--start', help='The start date in ' + input_dateformat + 'format', required=True)\n parser.add_argument('-e','--end', help='The end date'+ input_dateformat + 'format', required=True)\n args = vars(parser.parse_args())\n start=get_unixtime(args['start'], input_dateformat)\n end=get_unixtime(args['end'], input_dateformat)\n if end < start:\n print \"End timestamp must be later than start timestamp. Exiting\"\n sys.exit()\n print \"Will get trades from \", start, \"to\", end\n\n \"\"\" read the output file and adjust the start date, if it exists\n \"\"\"\n try:\n with open(outfile_name, \"r\") as in_file:\n goxdata = in_file.readlines() \n saved_start=get_unixtime(goxdata[0].split(\",\")[0], input_dateformat)\n saved_end=get_unixtime(goxdata[len(goxdata)-1].split(\",\")[0], input_dateformat)\n\n print \"File found, with start date:\", saved_start, \"and end date\", saved_end\n if start < saved_end:\n print \"Adjusted start time from \", start, \"to \", saved_end\n start = saved_end\n except IOError:\n print \"Output file not found. Will create a new one.\"\n\n \"\"\" get data from MtGox in chunks\n \"\"\"\n try:\n currstart = start\n endreached = False\n while endreached == False:\n # populate the trades dictionary with the next batch of data\n data = fetch_data(currstart)\n print \"Fetching data\", currstart\n if (data == '[]'):\n break \n trades = [mtgox_trade(a) for a in json.loads(data)]\n currstart = trades[-1].timestamp\n\n if trades[-1].timestamp > end:\n endreached = True\n\n # place trades into the out_file before getting the next batch from MtGox \n # so that if the program gets interrupt you have saved the trades obtained so far\n with open(outfile_name, \"a\") as out_file:\n for item in trades:\n # when you request data from a timestamp gox truncates your start time to seconds and then\n # send you everything including the initial second. So you must filter here trades\n # of the start_time second that are already in the database.\n if item.timestamp > start and item.timestamp < end:\n out_file.write(item.trade_to_string()+\"\\n\")\n\n except urllib2.HTTPError, e:\n print \"Error:\", str(e.code), str(e.reason)\n return\n except urllib2.URLError, e:\n print \"Error:\", e\n return", "def make_time_stamp_file():\n with open(TIME_STAMP_FILE_NAME, 'w') as f:\n f.write(datetime.datetime.now().strftime('%m/%d/%Y %I:%M%p'))", "def create_checkfile(artist_list):\n\n print(\"Creating checkfile...\")\n\n with open(\"checkfile.txt\", \"w\") as checkfile:\n\n for artist in artist_list:\n print(artist.name)\n for album in artist.albums:\n print(\"\\t\", album.name, album.year)\n for song in album.tracks:\n print(\"\\t\\t\", song.title)\n print(f\"{artist.name}\\t{album.name}\\t{album.year}\\t{song.title}\", file=checkfile)\n\n print(\"Checkfile created.\")\n print()\n print(\"=\" * 40)\n print()", "def save_output(output_list):\n wb = Workbook()\n ws1 = wb.active\n ws1.title = 'Sheet1'\n # Create title for columns\n columns_titles = ['Name', 'Condition description', 'birad[0]','birad[1]','birad[2]','birad[3]','birad[4]','birad[5]','birad[6]','Relevant modalities',\n 'Relevant findings', 'Unique findings','Additional info',\n 'Parameters', 50, 30, 10, 5, 1,'Pathogenomonic', 'Negative',\n 'Ignore', 'Associated conditions', 'Differential diagnosis', 'Notes']\n ws1.append(columns_titles)\n # Create list for output file\n for ol in output_list:\n for o in ol:\n cr_list = create_list(o)\n ws1.append(cr_list)\n wb.save(filename=OUTPUT_FILE)", "def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)", "def write_to_file_y(path):\n path1 = path + \"/y_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y%sz0ke%s.mac\" %(dy*y + y_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y%sz0ke%s.root\"\\n' %(dy*y + y_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 %s 0\\n\" % (dy*y + y_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)" ]
[ "0.5606279", "0.54776007", "0.5325414", "0.527905", "0.5232085", "0.5220749", "0.5214294", "0.5194305", "0.5157583", "0.5149266", "0.51426595", "0.51125914", "0.50968117", "0.50955224", "0.50598377", "0.50564367", "0.5056409", "0.5055913", "0.50520384", "0.5035325", "0.5033409", "0.5027646", "0.50266147", "0.5026436", "0.5022758", "0.5014538", "0.49741006", "0.49613667", "0.49586824", "0.49513194" ]
0.7020351
0
Function that downloads a whole video when no interval is supplied Downloaded to the same place where yt_vids is saved to (from save_link_time func)
def download_whole(no_interval): print(os.getcwd()) SAVE_PATH = 'tmp' ydl_opts = {"nocheckcertificate": True, "noplaylist": True, 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: for video in range(len(no_interval)): try: ydl.download([no_interval[video]]) except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError: print(f"Couldn't download {no_interval[video]}") continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "async def download_video(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"What I am Supposed to find? Give link\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, video_opts, url)\n if ytdl_down is None:\n return\n f = pathlib.Path(f\"{ytdl_data['title']}.mp4\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n await codevent.edit(\n f\"`Preparing to upload video:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n ul = io.open(f, \"rb\")\n c_time = time.time()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await event.delete()", "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)", "def download_wrapper(youtube_id, output_dir):\n # we do this to align with names in annotations\n output_filename = os.path.join(output_dir, youtube_id + '.mp4')\n if os.path.exists(output_filename):\n status = tuple([youtube_id, True, 'Exists'])\n return status\n\n downloaded, log = download(youtube_id, output_filename)\n status = tuple([youtube_id, downloaded, log])\n return status", "def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):\n \n raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id)\n video_info = parse.parse_qs(raw_video_info)\n \n if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']):\n title = parse.unquote_plus(video_info['title'][0])\n stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',')\n \n else:\n # Parse video page when video_info is not usable.\n video_page = get_content('http://www.youtube.com/watch?v=%s' % id)\n ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\\s*=\\s*([^\\n]+);'))\n \n title = ytplayer_config['args']['title']\n stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')\n \n streams = {\n parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream)\n for stream in stream_list\n }\n \n for codec in yt_codecs:\n itag = str(codec['itag'])\n if itag in streams:\n download_stream = streams[itag]\n break\n \n url = download_stream['url'][0]\n if 'sig' in download_stream:\n sig = download_stream['sig'][0]\n else:\n sig = decrypt_signature(download_stream['s'][0])\n url = '%s&signature=%s' % (url, sig)\n \n type, ext, size = url_info(url)\n \n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], title, ext, size, output_dir, merge = merge)", "def download_video(video_stream):\n global file_size\n file_size = size_in_mb(video_stream.filesize)\n home_dir = os.environ['HOME']\n path = f'{home_dir}/Downloads/Video'\n print('-'*60)\n print(f'Filename:\\t{video_stream.title}')\n print(f'Location:\\t{path}')\n print(f'Size:\\t\\t{file_size} MB\\n')\n\n filename = video_stream.title + '_video.mp4'\n filename = filename.replace('/', ' ')\n filename = filename.replace('\\\\', ' ')\n\n if os.path.exists(os.path.join(path, filename)):\n print(\"The file has been already downloaded.\")\n sys.exit()\n \n video_stream.download(path, filename)", "def download_video(url, fn):\n start_time = time.time()\n\n # Sorry: This is terrible code, but I'm kind of throwing it\n # together as I discover more about it.\n print ' Downloading {0} to {1}'.format(url, fn)\n\n resp = requests.get(url)\n if resp.status_code != 200:\n print ' GAH! MY EYES! {0} kicked up {1}'.format(url, resp.status_code)\n return\n\n rss_url_m = re.search(r'\"(/rss/flash/\\d+)\"', resp.content)\n rss_url = 'http://blip.tv' + rss_url_m.group(0).strip('\"')\n resp = requests.get(rss_url)\n\n rss_content = resp.content\n\n for ending in POSSIBLE_ENDINGS:\n regex = r'\"http://blip.tv[^\"]+?' + ending + '\"'\n\n download_m = re.search(regex, rss_content)\n if not download_m:\n print ' No {0} url found'.format(ending)\n continue\n\n download_url = download_m.group(0).strip('\"')\n print ' Attempting to download {0}'.format(download_url)\n\n try:\n resp = requests.get(download_url, stream=True)\n print ' Downloading {0}'.format(download_url)\n if resp.status_code == 200:\n total_length = int(resp.headers['content-length'])\n\n if os.path.exists(fn + ending) and file_size(fn + ending) == total_length:\n print ' Already downloaded.'\n return\n\n with open(fn + ending, 'w') as fp:\n total_downloaded = 0\n\n tic_chunk = total_downloaded\n tic = time.time()\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n fp.write(chunk)\n fp.flush()\n tic_chunk += len(chunk)\n total_downloaded += len(chunk)\n\n if time.time() - tic > 1:\n with TERM.location(x=0):\n line = ' {0} {1}kbps'.format(\n format_downloaded(total_downloaded, total_length),\n int(tic_chunk / (time.time() - tic) / 1000))\n sys.stdout.write(line + TERM.clear_eol)\n sys.stdout.flush()\n tic_chunk = 0\n tic = time.time()\n print ''\n\n print ' Done! {0} {1}mb {2}'.format(\n fn + ending,\n int(total_length / 1000000.0),\n format_duration(time.time() - start_time))\n return\n\n else:\n print ' HTTP{0}! GAH! SPUTTER!'.format(resp.status_code)\n\n except requests.exceptions.ConnectionError as exc:\n print ' CONNECTIONERROR! GAH! SPUTTER! {0}'.format(exc)\n\n print ' SO MANY FAILURES!'\n raise NoDownloadMeNoLikeyException()", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def download_clip(row, label_to_dir, trim, trimmed_label_to_dir, count):\n\n label = row['label']\n filename = row['youtube_id']\n time_start = row['time_start']\n time_end = row['time_end']\n\n # if trim, save full video to tmp folder\n output_path = label_to_dir['tmp'] if trim else label_to_dir[label]\n\n ydl_opts = {\n 'format': 'bestvideo[ext=mp4][filesize <? 50M]',\n }\n \n # Don't download if the video has already been trimmed\n has_trim = False\n if trim:\n start = str(time_start)\n end = str(time_end - time_start)\n output_filename = os.path.join(trimmed_label_to_dir[label],\n filename + '_{}_{}'.format(start, end) + VIDEO_EXTENSION)\n\n has_trim = os.path.exists(output_filename)\n\n # Don't download if already exists\n if not os.path.exists(os.path.join(output_path, filename + VIDEO_EXTENSION)) and not has_trim:\n print('Start downloading: ', filename) \n ydl_opts['outtmpl'] = os.path.join(output_path, '%(id)s.%(ext)s')\n \n try:\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([URL_BASE + filename])\n except YoutubeDLError as e:\n print('Download failed for ' + filename)\n log.warning(filename)\n return False\n\n print('Finish downloading: ', filename)\n else:\n print('Already downloaded: ', filename)\n\n if trim:\n # Take video from tmp folder and put trimmed to final destination folder\n # better write full path to video\n\n\n input_filename = os.path.join(output_path, filename + VIDEO_EXTENSION)\n\n if has_trim:\n print('Already trimmed: ', filename)\n else:\n print('Start trimming: ', filename)\n # Construct command to trim the videos (ffmpeg required).\n command = 'ffmpeg -i \"{input_filename}\" ' \\\n '-ss {time_start} ' \\\n '-t {time_end} ' \\\n '-c:v libx264 -c:a copy -threads 1 -y -nostdin ' \\\n '\"{output_filename}\"'.format(\n input_filename=input_filename,\n time_start=start,\n time_end=end,\n output_filename=output_filename\n )\n try:\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print('Error while trimming: ', filename)\n log.warning(filename)\n return False\n print('Finish trimming: ', filename)\n\n print('Processed %i out of %i' % (count + 1, TOTAL_VIDEOS))", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "def download_video(self, url):\n yt = YouTube(url)\n yt_filtered = yt.streams.filter(progressive=True, file_extension=\"mp4\")\n yt_resolutions = yt_filtered.order_by(\"resolution\")\n\n # Downloads the first video that fits the description\n video = yt_resolutions.desc().first()\n video.download()\n\n # Returns the filename\n return video.default_filename", "def download_ostrich_video(download_to_path):\n urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)", "def download_interval(interval_list):\n start = ['start', 'begin', 'beginning', 'head', 'first']\n end = ['slut', 'end', 'tail', 'finish',\n 'finito', 'fin', 'done', 'finished']\n\n # Iterate over the list\n for link in range(len(interval_list)):\n try:\n video = pafy.new(interval_list[link][0], ydl_opts={\n 'nocheckcertificate': True, \"noplaylist\": True})\n # Only downloads the video if the video hasn't been downloaded before\n if not os.path.exists(os.path.join(\"tmp\", f\"{video.title}.mp4\")):\n video_s = video.getbestvideo()\n # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list\n video_a = video.getbestaudio()\n\n # Checks if the end point is a string\n if interval_list[link][1][1].lower() in end:\n # Where is the stream, where should we start, how long should it run\n mp4_vid = ffmpeg.input(\n video_s.url, ss=interval_list[link][1][0], t=video.duration)\n mp4_aud = ffmpeg.input(\n video_a.url, ss=interval_list[link][1][0], t=video.duration)\n else:\n # Where is the stream, where should we start, how long should it run\n mp4_vid = ffmpeg.input(\n video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])\n mp4_aud = ffmpeg.input(\n video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])\n\n # Do the processing\n try:\n (\n ffmpeg\n .concat(\n # Specify what you want from the streams (v for video and a for audio)\n mp4_vid['v'],\n mp4_aud['a'],\n # One video stream and one audio stream\n v=1,\n a=1\n )\n # Output is title of video with mp4 ending\n .output(os.path.join(\"tmp\", f'{video.title}.mp4'))\n .run()\n )\n except TypeError as e:\n print(f\"An error occurred e 0: {e}\")\n except ffmpeg._run.Error as e:\n print(f\"An error occurred e 1: {e}\")\n except Exception as e:\n print(f\"I couldn't download {interval_list[link]} due to: {e}\")", "def download_skateline_video(download_to_path=None):\n urlretrieve(REMOTE_SKATELINE_VID_PATH, download_to_path)", "def youtube_download(url, output_dir='.', merge=True, info_only=False):\n \n id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v')\n assert id\n \n youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)", "def download(video, save_dir, vid):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n print(\"--> downloading {}\".format(video.title))\n\n best = video.getbest(preftype=\"mp4\")\n filename = best.download(\n filepath=os.path.join(save_dir,\n \"{}.{}\".format(vid, best.extension)))\n print(\"--> saved to {}\".format(filename))\n\n return os.path.join(save_dir, \"{}.{}\".format(vid, best.extension))", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'", "def download_truncated_ostrich_video(download_to_path=None):\n urlretrieve(REMOTE_TRUNCATED_OSTRICH_VID_PATH, download_to_path)", "def download_video(self, file_path, video_url, video_creation_time):\r\n logger.debug(\"Downloading video created at \" + _format_timestamp_iso(self.tz, video_creation_time) + \" from \"\r\n + video_url + \" to \" + file_path)\r\n failed = False\r\n try:\r\n self._download_with_api(file_path, video_url)\r\n except Exception as e:\r\n logger.debug(\"Video download failed using TikTokApi: \" + str(e))\r\n failed = True\r\n if not os.path.isfile(file_path):\r\n failed = True\r\n logger.debug(\"No file was created by TikTokApi at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n failed = True\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed TikTokApi download at \" + file_path)\r\n except Exception as ee:\r\n logger.error(\"Unable to delete malformed TikTokApi download at \" + str(ee))\r\n if failed:\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n try:\r\n logger.debug(\"Falling back to YouTube-dl\")\r\n self.fallback_counter += 1\r\n self._download_with_ytdl(file_path, video_url)\r\n if not os.path.isfile(file_path):\r\n raise AssertionError(\"No file was created by YouTube-dl at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed YouTube-dl download at \" + file_path)\r\n except Exception as ee:\r\n raise AssertionError(\"Malformed file was created at \" + file_path +\r\n \" and could not be removed: \" + str(ee))\r\n raise AssertionError(\"Malformed file was created at \" + file_path + \" and was removed\")\r\n failed = False\r\n except youtube_dl.utils.DownloadError as ee:\r\n logger.error(\"YouTube-dl DownloadError: \" + str(ee))\r\n self.ytdl_downloaderror_counter += 1\r\n failed = True\r\n except Exception as ee:\r\n logger.error(\"Video download failed with YouTube-dl: \" + str(ee))\r\n self.other_error_counter += 1\r\n failed = True\r\n if not failed:\r\n try:\r\n os.utime(file_path, (video_creation_time, video_creation_time))\r\n except Exception as e:\r\n logger.debug(\"Unable to set utime of \" + str(video_creation_time) + \" on file \" + file_path +\r\n \", Error: \" + str(e))\r\n return True\r\n return False", "def download_inward_video_url(self, download_inward_video_url):\n\n self._download_inward_video_url = download_inward_video_url" ]
[ "0.73994005", "0.72689897", "0.7202489", "0.7171504", "0.7060031", "0.6903402", "0.6774965", "0.67710614", "0.67318517", "0.6675659", "0.66156524", "0.66117424", "0.6610987", "0.6610455", "0.6599049", "0.6573128", "0.6570862", "0.6523697", "0.651439", "0.65080476", "0.6456765", "0.6438936", "0.6420223", "0.63628674", "0.6347987", "0.6316836", "0.630469", "0.6295695", "0.6261652", "0.62441987" ]
0.74180853
0
Function to download videos in specified intervals Takes a list (interval_list) and a path as inputs
def download_interval(interval_list): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] # Iterate over the list for link in range(len(interval_list)): try: video = pafy.new(interval_list[link][0], ydl_opts={ 'nocheckcertificate': True, "noplaylist": True}) # Only downloads the video if the video hasn't been downloaded before if not os.path.exists(os.path.join("tmp", f"{video.title}.mp4")): video_s = video.getbestvideo() # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list video_a = video.getbestaudio() # Checks if the end point is a string if interval_list[link][1][1].lower() in end: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=video.duration) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=video.duration) else: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) # Do the processing try: ( ffmpeg .concat( # Specify what you want from the streams (v for video and a for audio) mp4_vid['v'], mp4_aud['a'], # One video stream and one audio stream v=1, a=1 ) # Output is title of video with mp4 ending .output(os.path.join("tmp", f'{video.title}.mp4')) .run() ) except TypeError as e: print(f"An error occurred e 0: {e}") except ffmpeg._run.Error as e: print(f"An error occurred e 1: {e}") except Exception as e: print(f"I couldn't download {interval_list[link]} due to: {e}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n try:\n ydl.download([no_interval[video]])\n except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:\n print(f\"Couldn't download {no_interval[video]}\")\n continue", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n filename = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n filename.append(video[\"file\"])\n if filename:\n for name in filename:\n downloadvideo(name)\n else:\n safeprint(\"No video matching the given query was found.\")", "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download_videos(data, category):\n # file_ids = get_existing_file_ids()\n\n # Sorry: This is gross.\n directory = os.path.abspath('./' + slugify(category))\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n print 'Saving files to {0}'.format(directory)\n\n start_time = time.time()\n failed_videos = []\n\n for line in data[category]:\n print ''\n print 'Working on {0} - {1}'.format(line[0], line[2])\n\n # if line[0] in file_ids:\n # print ' Skipping -- already got it'\n # continue\n\n fn = '{0}_{1}'.format(line[0], slugify(line[2]))\n try:\n download_video(line[3], os.path.join(directory, fn))\n except NoDownloadMeNoLikeyException:\n failed_videos.append(line)\n\n print ''\n if failed_videos:\n print 'FAILED VIDEOS:'\n for fail in failed_videos:\n print ' ' + '\\t'.join(fail)\n print ''\n\n print 'Total videos: {0}'.format(len(data[category]))\n print 'Total time: {0}'.format(format_duration(time.time() - start_time))\n return 0", "def download_videos(self, path, since=None, camera=\"all\", stop=10, debug=False):\n if since is None:\n since_epochs = self.last_refresh\n else:\n parsed_datetime = parse(since, fuzzy=True)\n since_epochs = parsed_datetime.timestamp()\n\n formatted_date = get_time(time_to_convert=since_epochs)\n _LOGGER.info(\"Retrieving videos since %s\", formatted_date)\n\n if not isinstance(camera, list):\n camera = [camera]\n\n for page in range(1, stop):\n response = api.request_videos(self, time=since_epochs, page=page)\n _LOGGER.debug(\"Processing page %s\", page)\n try:\n result = response[\"media\"]\n if not result:\n raise IndexError\n except (KeyError, IndexError):\n _LOGGER.info(\"No videos found on page %s. Exiting.\", page)\n break\n\n self._parse_downloaded_items(result, camera, path, debug)", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_video(url, fn):\n start_time = time.time()\n\n # Sorry: This is terrible code, but I'm kind of throwing it\n # together as I discover more about it.\n print ' Downloading {0} to {1}'.format(url, fn)\n\n resp = requests.get(url)\n if resp.status_code != 200:\n print ' GAH! MY EYES! {0} kicked up {1}'.format(url, resp.status_code)\n return\n\n rss_url_m = re.search(r'\"(/rss/flash/\\d+)\"', resp.content)\n rss_url = 'http://blip.tv' + rss_url_m.group(0).strip('\"')\n resp = requests.get(rss_url)\n\n rss_content = resp.content\n\n for ending in POSSIBLE_ENDINGS:\n regex = r'\"http://blip.tv[^\"]+?' + ending + '\"'\n\n download_m = re.search(regex, rss_content)\n if not download_m:\n print ' No {0} url found'.format(ending)\n continue\n\n download_url = download_m.group(0).strip('\"')\n print ' Attempting to download {0}'.format(download_url)\n\n try:\n resp = requests.get(download_url, stream=True)\n print ' Downloading {0}'.format(download_url)\n if resp.status_code == 200:\n total_length = int(resp.headers['content-length'])\n\n if os.path.exists(fn + ending) and file_size(fn + ending) == total_length:\n print ' Already downloaded.'\n return\n\n with open(fn + ending, 'w') as fp:\n total_downloaded = 0\n\n tic_chunk = total_downloaded\n tic = time.time()\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n fp.write(chunk)\n fp.flush()\n tic_chunk += len(chunk)\n total_downloaded += len(chunk)\n\n if time.time() - tic > 1:\n with TERM.location(x=0):\n line = ' {0} {1}kbps'.format(\n format_downloaded(total_downloaded, total_length),\n int(tic_chunk / (time.time() - tic) / 1000))\n sys.stdout.write(line + TERM.clear_eol)\n sys.stdout.flush()\n tic_chunk = 0\n tic = time.time()\n print ''\n\n print ' Done! {0} {1}mb {2}'.format(\n fn + ending,\n int(total_length / 1000000.0),\n format_duration(time.time() - start_time))\n return\n\n else:\n print ' HTTP{0}! GAH! SPUTTER!'.format(resp.status_code)\n\n except requests.exceptions.ConnectionError as exc:\n print ' CONNECTIONERROR! GAH! SPUTTER! {0}'.format(exc)\n\n print ' SO MANY FAILURES!'\n raise NoDownloadMeNoLikeyException()", "def create_original_videos(frames, video_path, interval):\n ncols = int(math.sqrt(len(frames)))\n fig, ax = plt.subplots(\n ncols=ncols,\n nrows=ncols,\n figsize=(5 * ncols, 5 * ncols),\n tight_layout=True,\n )\n max_len = max([len(f) for f in frames])\n\n def init():\n ims = []\n k = 0\n for k in range(ncols):\n for j in range(ncols):\n ims.append(ax[j][k].imshow(unnorm(frames[k * ncols + j][0])))\n ax[j][k].grid(False)\n ax[j][k].set_xticks([])\n ax[j][k].set_yticks([])\n return ims\n\n ims = init()\n\n def update(i):\n print(\"{}/{}\".format(i, max_len))\n for k in range(ncols):\n for j in range(ncols):\n idx = (\n i\n if i < len(frames[k * ncols + j])\n else len(frames[k * ncols + j]) - 1\n )\n ims[k * ncols + j].set_data(unnorm(frames[k * ncols + j][idx]))\n plt.tight_layout()\n return ims\n\n anim = FuncAnimation(\n fig, update, frames=np.arange(max_len), interval=interval, blit=False,\n )\n anim.save(video_path, dpi=80)", "def browse_video_list(video_list: List[str], browser: webdriver.Firefox):\n if not video_list or len(video_list) == 0:\n log.warning(\"Empty video list or null list.\")\n log.debug(\"Video list to be watched: {}\".format(video_list))\n unknown_failure_counter: int = 0\n js_execution_failure_counter: int = 0\n success_count: int = 0\n total_video_length: int = len(video_list)\n log.info(\"Start watching list of videos, total size: {}\".format(total_video_length))\n for i, video in enumerate(video_list):\n current_success: bool = False\n retry_count: int = 0\n video: str = FireFoxSimpleAutoBrowsing.__trim_youtube_link(video)\n log.info(\"Index: {}, watching: {}\".format(i + 1, video))\n current_video_screenshot_dir: str = os.path.join(\n FireFoxSimpleAutoBrowsing.SCREENSHOT_PATH,\n video.replace('/', '-').replace(':', '-').replace('.', '-'))\n if not os.path.exists(current_video_screenshot_dir):\n os.makedirs(current_video_screenshot_dir)\n log.debug(\"\\tCreate dir {} for screenshot\".format(current_video_screenshot_dir))\n log.info(\"\\tScreenshot for video {} saved at {}.\"\n .format(video, current_video_screenshot_dir))\n while not current_success and retry_count < FireFoxSimpleAutoBrowsing.RETRY_CHANCES:\n try:\n refreshed: bool = False\n browser.get(video)\n if settings.fast:\n FireFoxSimpleAutoBrowsing.__play_at_fastest_speed(browser)\n current_status: str = FireFoxSimpleAutoBrowsing.__get_player_status(browser)\n video_time: float = FireFoxSimpleAutoBrowsing.__get_video_elapsed_time(browser)\n while current_status != \"ended\" and video_time < settings.watch_time:\n previous_video_time: float = video_time\n previous_status: str = current_status\n log.debug(\"\\tStatus: {}, video time: {:7.2f}s\"\n .format(current_status, video_time))\n time.sleep(FireFoxSimpleAutoBrowsing.STATUS_CHECK_INTERVAL)\n screenshot_file_name: str = \\\n os.path.join(current_video_screenshot_dir, str(time.ctime()) + \".png\")\n browser.save_screenshot(screenshot_file_name)\n video_time = FireFoxSimpleAutoBrowsing.__get_video_elapsed_time(browser)\n current_status = FireFoxSimpleAutoBrowsing.__get_player_status(browser)\n if abs(previous_video_time - video_time) < 10e-3 \\\n and previous_status == current_status \\\n and current_status in ['unstarted', 'paused', 'buffering']:\n if not refreshed:\n browser.refresh()\n log.warning(\"\\tVideo playing frozen. \"\n \"Try resolve by browser refreshed..\")\n refreshed = True\n else:\n raise YouTubePlayerException(\n \"\\tYouTube video play frozen, video stopped time: {}, \"\n \"current play status: {}.\".format(video_time, current_status),\n video)\n current_success = True\n success_count += 1\n except JavascriptException:\n js_execution_failure_counter += 1\n log.warning(\"JavascriptException during watching video {}, \"\n \"most like caused by unavailable video. \"\n \"Traceback is provided for analysis. Jump to next video (if any).\"\n .format(video), exc_info=True)\n break\n except InvalidSessionIdException as e:\n log.critical(\"Lost connection to Firefox browser, or Firefox browser crashed.\"\n \" Probably due to previous viewing too many videos. There is no \"\n \"point to continue current test. Quit the whole experiment.\")\n raise\n except Exception as e:\n retry_count += 1\n log.error(\"Exception during watching video {}, caused by: {},\"\n \" retry count: {}\".format(video, e, retry_count),\n exc_info=True)\n if retry_count >= FireFoxSimpleAutoBrowsing.RETRY_CHANCES:\n unknown_failure_counter += 1\n log.error(\"Video {} failed after retry {} times.\"\n .format(video, retry_count))\n log.info(\"Finished watching list, total {}, succeed count: {}, \"\n \"unknown failed count: {}, possible video unavailable count: {}\".\n format(total_video_length, success_count, unknown_failure_counter,\n js_execution_failure_counter))\n return", "def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"", "def extract_frames_from_directory(count, source, destination):\n all_videos = os.listdir(source)\n print(all_videos)\n\n for video in all_videos:\n video_file = source + video # Retrieve a video from the OverHeadPress\n cap = cv2.VideoCapture(video_file) # capturing the video from the given path\n dim = (224, 224)\n\n while cap.isOpened():\n frame_id = cap.get(1) # current frame number\n ret, frame = cap.read()\n if not ret:\n break\n\n # We are capturing at 28 frames per second. \n # If we want to capture every 0.2 seconds we will take every 5 frames\n if frame_id % 8 == 0:\n filename =\"frame%d.jpg\" % count\n count+=1\n resized = cv2.resize(frame, dim)\n cv2.imwrite(destination + filename, resized)\n\n cap.release()\n print (\"Finished processing: \" + video + \". Ended at video: \" + str(count))", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def stitch_video_temporal(intervals, out_path,\n align_args={'align_mode': None},\n dilation_args={'dilation': None}, \n speed=None,\n im_size=(640, 480)):\n # parse args\n align_mode = align_args['align_mode']\n if not align_mode is None:\n out_duration = align_args['out_duration']\n if align_mode == 'phrase':\n segments = align_args['segments']\n dilation = dilation_args['dilation']\n if not dilation is None:\n person_intrvlcol = dilation_args['person_intrvlcol']\n \n intervals = intervals.copy()\n def download_video_clip(interval):\n video_id, sfid, efid, duration = interval\n video = Video.objects.filter(id=video_id)[0]\n start, end = 1. * sfid / video.fps, 1. * efid / video.fps\n video_path = video.download(segment=(start, end))\n if align_mode == 'phrase' and duration != 0:\n video_path = speed_change(video_path, speed=(end-start) / duration)\n return video_path\n \n # deal with phrase duration\n if not align_mode is None:\n in_duration = sum([i[-1] for i in intervals])\n if align_mode == 'phrase':\n num_syllables = [count_syllables(phrase) for phrase in segments]\n duration_per_syl = 1. * out_duration / sum(num_syllables)\n for idx, i in enumerate(intervals):\n intervals[idx] = (i[0], i[1], i[2], num_syllables[idx] * duration_per_syl)\n # download clips for each phrase \n clip_paths = par_for(download_video_clip, intervals)\n \n # add silent clip for break \n if not dilation is None and dilation < 0.1:\n dilation = None\n if not dilation is None:\n if dilation > 1:\n break_path = create_silent_clip(person_intrvlcol, dilation)\n else: \n video_id, sfid, efid = intervals[-1][:3]\n video = Video.objects.filter(id=video_id)[0]\n interval = (video_id, efid, efid + int(dilation*video.fps), 0)\n break_path = download_video_clip(interval)\n break_path = mute_video(break_path)\n \n # concat phrase clips\n if len(intervals) > 1:\n lyric_path = concat_videos(clip_paths, im_size=im_size)\n else:\n lyric_path = clip_paths[0]\n \n # global change lyric speed \n if align_mode == 'sentence' or not speed is None: \n if speed is None:\n speed = in_duration / out_duration\n# print(in_duration, out_duration, speed)\n lyric_path = speed_change(lyric_path, speed)\n \n # concat the dilation clip\n if not dilation is None:\n concat_videos([lyric_path, break_path], out_path, im_size=im_size)\n else:\n shutil.move(lyric_path, out_path)", "def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)", "def download(urls, dest_folder):\n pass", "def segment_intervals(filename, basedir, start_sec=0, end_sec=0, seconds_between_frame_grabs=10):\n # Get video id\n video_id = re.findall('v\\d+', filename)[0]\n\n # Open file handle\n vid = imageio.get_reader(filename, 'ffmpeg')\n\n # Get metadata\n meta = vid.get_meta_data()\n fps = int(meta['fps'])\n nframes = meta['nframes']\n frames_to_get = np.arange(start_sec, end_sec, seconds_between_frame_grabs) * fps\n\n # Check frames\n for i in frames_to_get:\n try:\n img = vid.get_data(i)\n except:\n raise\n\n # Downlsample full image\n downsampled = downsample_image(img)\n\n # Player status\n h, w, c = img.shape\n factor = 0.23\n y1 = int(h * 0.04)\n x1 = int(w * (0.5 - factor))\n y2 = int(h * 0.0645)\n x2 = int(w * (0.5 + factor))\n pl_status = crop_rect(img, x1, y1, x2, y2)\n\n # Block out center of player status\n h, w, c = pl_status.shape\n factor = .12\n x1 = int(w * (0.5 - factor))\n x2 = int(w * (0.5 + factor))\n pl_status = blackout_middle(pl_status, x1, x2)\n\n # Write full frame and header frame\n imageio.imwrite(os.path.join(basedir, 'full_{}_s{}.png'.format(video_id, int(i/fps))), img)\n imageio.imwrite(os.path.join(basedir, 'pl_status_{}_s{}.png'.format(video_id, int(i/fps))), pl_status)\n imageio.imwrite(os.path.join(basedir, 'downsampled_{}_s{}.png'.format(video_id, int(i/fps))), downsampled)\n\n vid.close()\n\n return True", "def test_video(video_path):\n def get_clips(frames_list, sequence_size=11):\n clips = []\n clip = []\n cnt = 0\n sz = len(frames_list)\n for i in range(0, sz-sequence_size):\n for idx in range(i, i+sequence_size):\n clip.append(frames_list[idx])\n clips.append(clip)\n clip = []\n return clips\n \n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n dir_path = listdir(video_path)\n dir_path = sorted(dir_path, key=lambda name: int(name[0:-4]))\n for i in dir_path:\n if str(join(video_path, i))[-3:] == \"png\":\n img_path = join(video_path, i)\n all_frames.append(img_path)\n clips = get_clips(frames_list=all_frames, sequence_size=11)\n# clips = get_clips_by_stride(stride=1, frames_list=all_frames, sequence_size=11)\n return clips", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)", "def play_video(path):\r\n #logger.info(\"######: {}, log: {}########\".format('rk8', path))\r\n #original=https://api.hotstar.com/h/v1/play?contentId=1000238814\r\n #path=\"https://api.hotstar.com/h/v2/play/in/contents/1000238814\"\r\n # Create a playable item with a path to play.\r\n data = make_request(path)\r\n #logger.info(\"######: {}, log: {}########\".format('rk3', path))\r\n if not data:\r\n return\r\n\r\n def get_subtitle(url):\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 265b9dab22d4e9a033e6df6f89639f17/master.m3u8?hdnea=st=1560107863~exp=1560111463~acl=\r\n # /*~hmac=2f6fb393159ed5fa1b12bbf12e954eb377cfa0fc852d4ff5eb24446233237620\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 5d0f83c3ccbf4501cf952bdfc8c0d785/subtitle/lang_en/sub-0.vtt\r\n #\r\n _url = urlparse(url)\r\n values = _url._asdict()\r\n values['query'] = ''\r\n values['path'] = '{}/subtitle/lang_en/sub-0.vtt'.format(\"/\".join(values['path'].split('/')[:-1]))\r\n\r\n subtitle_url = ParseResult(**values).geturl()\r\n # subtitle_file = kodiutils.download_url_content_to_temp(subtitle_url, '{}-{}.srt'.format(\r\n # Zee5Plugin.safe_string(item['title']),\r\n # subtitle_lang,\r\n # ))\r\n\r\n return subtitle_url\r\n\r\n #logger.info(\"######: {}, log: {}########\".format('rk6', data))\r\n #item = data['body']['results']['item']\r\n item=data['body']['results']['playBackSets'][0]\r\n path = item['playbackUrl']\r\n licenseURL = item.get('licenseUrl')\r\n subtitle = get_subtitle(path)\r\n\r\n logger.info('Playing video URL: {}, licenseURL: {}, subtitle: {}'.format(path, licenseURL, subtitle))\r\n\r\n play_item = xbmcgui.ListItem(path=path)\r\n if licenseURL:\r\n play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')\r\n play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')\r\n play_item.setMimeType('application/dash+xml')\r\n play_item.setContentLookup(False)\r\n\r\n play_item.setSubtitles([get_subtitle(path)])\r\n\r\n # Pass the item to the Kodi player.\r\n xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)", "def api_get_videos_duration(list_videos, api_service):\n if list_videos:\n durations = []\n dates = []\n\n if isinstance(list_videos[0], tuple):\n chunks50 = divide_chunks([video[0] for video in list_videos], 50)\n\n else:\n chunks50 = divide_chunks([video for video in list_videos], 50)\n\n # print(chunks50)\n\n for chunk in chunks50:\n request = api_service.videos().list(id=\",\".join(chunk),\n part=['contentDetails', 'snippet'],\n maxResults=50).execute()\n\n # print(request)\n\n durations += [parse_duration(element[\"contentDetails\"][\"duration\"]) for element in request[\"items\"]]\n dates += [element[\"snippet\"][\"publishedAt\"] for element in request[\"items\"]]\n\n # print(len(list_videos), len(durations), len(dates))\n\n id_and_duration = sorted([(video_id, durations[idx], datetime.strptime(dates[idx], \"%Y-%m-%dT%H:%M:%S%z\"))\n for idx, video_id in enumerate(list_videos)], key=lambda tup: tup[2])\n\n return id_and_duration\n\n return []", "def video_slicer(filepath, save_path, start_indexes, end_indexes):\n cap = cv2.VideoCapture(filepath)\n\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n out = cv2.VideoWriter(save_path,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width, frame_height))\n\n if len(start_indexes) != len(end_indexes):\n return(\"Time stamps must be the same length\")\n\n basket_counter = 0\n frame_counter = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n # if no more frames then break\n break\n\n if basket_counter >= len(end_indexes):\n # if we've gotten all our slices then break\n break\n\n if frame_counter >= start_indexes[basket_counter] and frame_counter <= end_indexes[basket_counter]:\n # if we are in a basket then save that frame\n out.write(frame)\n elif frame_counter > end_indexes[basket_counter]:\n # if we just left a basket then increment our bascket counter\n basket_counter += 1\n\n frame_counter += 1\n\n\n cap.release()\n out.release()\n print(\"{} clips were sliced\".format(str(basket_counter)))\n print(\"File Saved to {}\".format(save_path))", "def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]", "def kegg_download_manager_synchronous(list_of_ids, wait=1):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]", "def download(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start downloading: %d' % i)\n os.system(\"scp %s:18DOWNLOAD downloads/\" % server)\n end_time = time.time()\n logging.debug('End downloading...')\n logging.debug('Time taken by downloader: %s' % (end_time - start_time))" ]
[ "0.6634631", "0.6432747", "0.63896453", "0.6305958", "0.60365444", "0.59619087", "0.5885181", "0.5815725", "0.56896555", "0.5573953", "0.5560245", "0.5557694", "0.55472976", "0.5520894", "0.5493748", "0.5459092", "0.54333603", "0.54173565", "0.54105514", "0.54061747", "0.54023707", "0.5356837", "0.5349421", "0.5303259", "0.52836955", "0.52768236", "0.5275138", "0.52495116", "0.5224876", "0.5218306" ]
0.79695
0
Function to download pictures from the input sequence
def download_pics(pics_links): for link in range(len(pics_links)): r = requests.get(pics_links[link][0]) with open(os.path.join("tmp", f"{link}.jpg"), "wb") as dl: dl.write(r.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def downloadMinio(url_list,list_d):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n\n\n if r.status_code == 200:\n r.raw.decode_content = True\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n metadata = list_d[i]\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n path = os.getcwd()+'/'+filename # image path\n minioClient.fput_object(name,filename,path,'image/jpg',metadata)\n os.remove(filename)\n print(filename,'have been successfuly uploaded')\n print('Done!')", "def download_images(keyword, limit = 1):\n #creating list of arguments\n arguments = {\"keywords\": keyword ,\n \"limit\": limit , \n \"print_urls\": False,\n \"output_directory\": OUT_DIR} \n\n # Pass the arguments to above function and download images\n paths = response.download(arguments)", "async def dl_image(url, filename):\n\ttry:\n\t\twith aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url) as resp:\n\t\t\t\ttest = await resp.read()\n\t\t\t\twith open('data/tmp/'+filename.lower(), \"wb\") as f:\n\t\t\t\t\tf.write(test)\n\t\t\t\treturn 0\n\texcept Exception as e:\n\t\tprint('[!ERROR!] in Get image')\n\t\tprint(e)\n\t\treturn -1", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def download(word, n_images=100):\n\n # Fields for pixbay from https://pixabay.com/api/docs/#api_search_images\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\n for i in range(5):\n fields = {\n \"key\": _(s.__secret__, egg_open()),\n \"q\": word,\n \"image_type\": \"photo\",\n \"safesearch\": \"true\",\n \"per_page\": max(3, min(200, n_images + i))\n }\n\n debug_log(f\"fields for request:\\n{ {key: fields[key] for key in fields.keys() if key != 'key'} }\")\n\n r = http.request(method='GET',\n url='https://pixabay.com/api/',\n fields=fields)\n\n debug_log(f\"Response data: {r.data}\")\n\n if \"ERROR\" in str(r.data, 'utf-8'):\n continue\n else:\n break\n\n try:\n data = json.loads(r.data.decode('utf-8'))\n except json.decoder.JSONDecodeError as e:\n warnings.warn(\"Cannot download '{word}'. Bad response: {response}\".format(\n word=word,\n response=str(r.data, 'utf-8')\n ))\n return False\n\n image_urls = [item[\"largeImageURL\"] for item in data[\"hits\"]]\n image_ids = [item[\"id\"] for item in data[\"hits\"]]\n\n\n debug_log(f\"Image urls: {image_urls}\")\n debug_log(f\"Len Image urls: {len(image_urls)}\")\n\n save_dir = os.path.join(s.__STEP_1_CACHE_DIR__, word)\n os.makedirs(save_dir, exist_ok=True)\n\n if len(image_urls) < n_images:\n warnings.warn(\"Not enough images for {word}. Only {len_image_urls} instead of {n_images}.\".format(\n word=word,\n len_image_urls=len(image_urls),\n n_images=n_images\n ))\n open(os.path.join(save_dir, \"SATURATED\"), 'w').close()\n open(os.path.join(save_dir, \"DO_NOT_DELETE\"), 'w').close()\n\n image_paths = [get_unique_save_path_name(save_dir,\n im_id,\n im_url.split('.')[-1]) # Get the right image extension\n for im_id, im_url in zip(image_ids, image_urls)]\n\n debug_log(f\"Image paths: {image_paths}\")\n\n for i, im_url, im_path in zip(range(len(image_urls)), image_urls, image_paths):\n debug_log(f\"Downloading '{word}' image [{i+1}/{len(image_urls)}]: {im_url}\")\n save_file(im_url, im_path, http)\n debug_log(f\"Done! Saved as {im_path}\")\n\n return True", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def getimgs():", "def get_image(self, index):\r\n \r\n # Get request to get all the links for all exercises\r\n image = requests.get(API.url_image, headers = self.headers).json()\r\n filename = download(image[index]['image'])", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_card_images(self, card_names, lang=\"en\"):\n for card_name in card_names:\n print(\"Dowloading card imgs for \\'\" + card_name + \"\\' (\" + lang + \")\")\n output_file_name = card_name + \".jpg\"\n output_file_path = IoManager.CARD_IMAGES_PATH_EN + \"/\" + output_file_name if lang == \"en\" else IoManager.CARD_IMAGES_PATH_FR + \"/\" + output_file_name\n output_file_path = output_file_path.replace('//', '__')\n en_url, fr_url = self.get_card_urls(card_name)\n url = en_url if lang == \"en\" else fr_url\n # Open the url image, set stream to True, this will return the stream content.\n resp = requests.get(url, stream=True)\n # Open a local file with wb ( write binary ) permission.\n local_file = open(output_file_path, 'wb')\n # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n resp.raw.decode_content = True\n # Copy the response stream raw data to local image file.\n shutil.copyfileobj(resp.raw, local_file)\n # Remove the image url response object.\n del resp", "def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))", "def download(subreddits):\r\n print(subreddits)\r\n pic_urls = get_urls.reddit_pics(subreddits, PAGES, SORTING, TIME_PERIOD)\r\n image_downloader.check_folder(FOLDER_PATH)\r\n image_downloader.download_pics(pic_urls, FOLDER_PATH)", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def download_images_png(self):\n self.show_as_waiting(True)\n self.download_images('PNG')\n self.show_as_waiting(False)", "def download_dilbert(s, u):\n with open(\"comicfile.jpg\", \"wb\") as file:\n response = s.get(u)\n file.write(response.content)", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched" ]
[ "0.66714674", "0.6665392", "0.662957", "0.65895194", "0.6587865", "0.6502355", "0.64724034", "0.64450634", "0.64316654", "0.6391709", "0.6382724", "0.6302078", "0.6292136", "0.6259905", "0.6259636", "0.6252637", "0.6219414", "0.6212664", "0.61892736", "0.61513776", "0.6140734", "0.6130097", "0.61186403", "0.61059463", "0.60937715", "0.60852975", "0.6082696", "0.60770524", "0.6076696", "0.6074698" ]
0.6981115
0
Get chain attribute for an object.
def chain_getattr(obj, attr, value=None): try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_chain_getattr(obj, attr):\n return reduce(getattr, attr.split('.'), obj)", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def deepgetattr(obj, attr):\n\t\treturn reduce(getattr, attr.split('.'), obj)", "def get_attr(obj, attr):\n return getattr(obj, attr)", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def get_object_attribute_from_filter(obj, components):\n\n try:\n return getattr(\n obj, components[-1] if hasattr(obj, components[-1]) else components[-2]\n )\n except: return None", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def getattribute(objeto, name: str):\r\n # Get internal dict value matching name.\r\n value = objeto.__dict__.get(name)\r\n if not value:\r\n # Raise AttributeError if attribute value not found.\r\n return None\r\n # Return attribute value.\r\n return value", "def get_nested_attr(__o: object, __name: str, *args) -> Any:\n def _getattr(__o, __name):\n return getattr(__o, __name, *args)\n return reduce(_getattr, [__o] + __name.split('.')) # type: ignore", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n if self._isdecorator():\n if self.fdec is None:\n raise AttributeError(\"can't decorate with attribute\")\n return self.fdec(obj)\n else:\n return super().__get__(obj, objtype)\n # if self.fget is None:\n # raise AttributeError(\"unreadable attribute\")\n # return self.fget(obj)", "def get_from_object(obj, attribute):\n jsonpath_expr = parse_path(attribute)\n return_list = [i.value for i in jsonpath_expr.find(obj)]\n if return_list:\n return return_list[0]\n return None", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def deepgetattr(obj, attr):\n for key in attr.split('.'):\n obj = getattr(obj, key)\n return obj", "def deepgetattr(obj, attr, default=None, splitter='.', do_raise=False):\n try:\n return reduce(getattr, attr.split(splitter), obj)\n except AttributeError:\n if do_raise:\n raise\n return default", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def get_attr(self, name: str):\n return self.call(name)", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def deepgetattr(obj, attr, default=AttributeError):\n try:\n return reduce(getattr, attr.split(\".\"), obj)\n except AttributeError:\n if default is not AttributeError:\n return default\n raise", "def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))", "def _obj_getattr(obj, fqdn, start=1):\n node = obj\n for chain in fqdn.split('.')[start:]:\n if hasattr(node, chain):\n node = getattr(node, chain)\n else:\n node = None\n break\n return node", "def _getter(obj, attr):\n _get = attrgetter(attr)\n try:\n return _get(obj)\n except:\n return None", "def __getattribute__(self, name):\n # special attribute that need to go straight to this obj\n if name in ['pget', 'pobj', '_delegate', '_wrap', '_get', \n '__class__', '__array_finalize__', 'view', '__tr_getattr__']:\n return object.__getattribute__(self, name)\n\n try:\n return self.__tr_getattr__(name)\n except:\n pass\n\n if hasattr(self.pobj, name):\n return self._wrap(name) \n \n return object.__getattribute__(self, name)", "def get_attr(obj: Any, key: str):\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj", "def get_attr(obj: Any, key: str):\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def __getattr__(self, attr):\n return getattr(self.obj, attr)" ]
[ "0.6868703", "0.65075165", "0.6485146", "0.64567274", "0.62154573", "0.61727905", "0.61658573", "0.6000796", "0.5964722", "0.5916216", "0.5916216", "0.5876859", "0.5874207", "0.5869589", "0.5869589", "0.5850275", "0.58463216", "0.58114004", "0.5795329", "0.57904404", "0.57904404", "0.5784626", "0.5782953", "0.57762647", "0.5772085", "0.57297087", "0.57264286", "0.57264286", "0.56935185", "0.56935185" ]
0.7126414
0
trim the list to make total length no more than limit.If split specified,a string is return.
def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''): if split is None: sl = 0 join = False else: sl = len(split) join = True result = [] rl = 0 for element in iterable: element = prefix + element + postfix el = len(element) if len(result) > 0: el += sl rl += el if rl <= limit: result.append(element) else: break if join: result = split.join(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(self, input_words_list):\n def to_be_trimmed(x):\n if len(x) < 3:\n return False\n else:\n return True\n self.trimmed_words_list = list(filter(to_be_trimmed, input_words_list))\n # print('the filtered words are:')\n # for word in trimmed_words_list:\n # print(word)\n return self.trimmed_words_list", "def soar_trimlist(org_list):\n if not isinstance(org_list, list):\n return org_list\n return [element.strip() for element in org_list]", "def ltrim1 (l,proportiontocut,tail='right'):\r\n if tail == 'right':\r\n lowercut = 0\r\n uppercut = len(l) - int(proportiontocut*len(l))\r\n elif tail == 'left':\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l)\r\n return l[lowercut:uppercut]", "def trimlist(self, iplist, maxIPs = 3):\n \n if len(iplist) > 0:\n iplist = iplist[:maxIPs:]\n return iplist\n else:\n print(\"Error: No IP A-records found.\")\n return None", "def ltrimboth (l,proportiontocut):\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l) - lowercut\r\n return l[lowercut:uppercut]", "def repair_size_list(self, str_val):\n return [word for word in str_val[2:-2].split('\\', \\'')]", "def limit(st,length):\n return st[:length]", "def test_trim(self):\n s1 = 'esrdctfvubfiqisqwduonq'\n assert lws.trim(s1, 5) == 'esrdc...'\n assert lws.trim(s1, 20) == 'esrdctfvubfiqisqwduo...'\n s2 = 'asdasdasd'\n assert lws.trim(s2) == 'asdasdasd'", "def _rsplit(value, sep, maxsplit=None):\n\tstr_parts = value.split(sep)\n\tif (maxsplit is not None) and (len(str_parts) > 1):\n\t\treturn [str.join(sep, str_parts[:-maxsplit])] + str_parts[-maxsplit:]\n\treturn str_parts", "def split(self, sep=None, maxsplit=None):\n return split(self, sep, maxsplit)", "def clean_tag(elmt_with_commas, max_lenght):\r\n elmt_list = elmt_with_commas.split(\",\")\r\n elmt_list = [e.strip() for e in elmt_list if len(e) < max_lenght]\r\n return elmt_list", "def dividir(l):\n\n\ta = []\n\tfor i in range(len(l)):\n\t\ta += l[i].split(' ')\n\treturn a[:100]", "def trim_to_upper_length_limit(self) -> None:\n self.trim_utils.lang_model = self.config['language_model']\n\n dataframe_splits = np.array_split(self.data, self.n_cores)\n pool = Pool(self.n_cores)\n self.data = pd.concat(pool.map(self.trim_text_for_dataframe, dataframe_splits))\n pool.close()\n pool.join()", "def check_list(list_obj, limit):\r\n if len(list_obj) > limit:\r\n num_of_lists = int(len(list_obj) / limit) + 1\r\n sublist = []\r\n k = 0\r\n while k < num_of_lists:\r\n x = list_obj[limit*k:limit*(k+1)]\r\n sublist.append(x)\r\n k += 1\r\n\r\n return sublist\r\n\r\n return list_obj", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def split(self) -> List[String]:\n pass", "def trim_items(self, val):\n self.order_items = self.order_items[:val]", "def listsplit(value, arg):\n\n print \"listsplit:{0}\".format(arg)\n args = arg.split(\",\")\n if not len(args) == 2:\n return value\n\n i = int(args[0])\n n = int(args[1])\n\n m = len(value)\n\n base = m // n\n rem = m % n\n\n sizes = [base + 1] * rem + [base] * (n - rem)\n\n start = sum(sizes[0:i])\n end = start + sizes[i]\n\n return value[start:end]", "def truncate(x: str, limit: int) -> str:\n return \" \".join(x.split()[:limit])", "def explode(delim, val, limit = None): \n if limit != None:\n return val.split(delim, limit)\n else:\n return val.split(delim)", "def trim(self, start, end):", "def split_message(message, max_length):\n ms = []\n while len(message) > max_length:\n ms.append(message[:max_length])\n message = message[max_length:]\n ms.append(message)\n return ms", "def trimmedLength(self):\r\n\t\treturn (self.sourceLength - (self.trimEnd + self.trimStart))", "def splitter(self, lts, size, res=\"l\"):\n if res == \"l\":\n new_list = [lts[i:i + size] for i in range(0, len(lts), size)]\n elif res == \"s\":\n new_list = [\",\".join(lts[i:i + size])\n for i in range(0, len(lts), size)]\n\n return new_list", "def _FixLongList(self, long_list, chunk_size):\n split_list = []\n length = len(long_list)\n if length > chunk_size:\n list_size = chunk_size - 1\n pages, mod = divmod(length, list_size)\n if mod:\n pages += 1\n for page in range(pages):\n split_list.append(long_list[list_size * page:list_size * (page+1)])\n else:\n split_list.append(long_list)\n return split_list", "def _clean_list(self, items):\n itemlist = list(filter(None, items))\n if len(itemlist) < 3:\n itemlist.append(\"\")\n return itemlist\n\n return itemlist", "def __string_splitter(self, arr, string, split_length):\n if len(string) < split_length:\n arr.append(string)\n return arr\n else:\n arr.append(string[:split_length])\n return self.__string_splitter(arr, string[split_length:], split_length)", "def split(self, string, maxsplit=MAX_INT, include_separators=False):\n return self._split(\n string, maxsplit=maxsplit, include_separators=include_separators\n )", "def rsplit(self, sep=None, maxsplit=None):\n return rsplit(self, sep, maxsplit)", "def rsplit(self) -> List[String]:\n pass" ]
[ "0.61260587", "0.61242557", "0.60332996", "0.5976751", "0.59576887", "0.59357154", "0.59291357", "0.575979", "0.5731277", "0.56639487", "0.5637736", "0.5636854", "0.5605196", "0.5510604", "0.5455997", "0.54495543", "0.5449324", "0.544276", "0.54360414", "0.5429126", "0.54289603", "0.5424462", "0.5389596", "0.53759766", "0.5372024", "0.53625154", "0.53508806", "0.53421175", "0.53420347", "0.53279" ]
0.66803694
0
It raises an error when trying to decrypt a nonencrypted value.
def test_decrypt_format(self): with pytest.raises(EncryptionError): decrypt('message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'", "def _decrypt(self, value, **options):\n\n raise CoreNotImplementedError()", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_decrypt_encrypted(self):\n encrypted = encrypt('message')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'message'", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decrypt(self, data):", "def decrypt(self, value):\n return self._execute(value, task='decrypt')", "def decrypt_message(encrypted_message):", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_decrypt_encoding(self):\n encrypted = encrypt('méssåge')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'méssåge'", "def test_real_world_malware(self):\n key = bytes.fromhex('0394d550fb286dda')\n data = bytes.fromhex('6bdb2c294e7e031c38e4adecaa8dc755')\n unit = self.load(key, raw=True)\n self.assertEqual(unit.decrypt(data).hex(), '4c5a495001b30026968e700017f7ec05')", "def test_kms_decrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_decrypt(self.mock_kms, self.secret)", "def test_decoding_non_str_fails(self):\n self.assertRaises(DecodingError, base62.to_decimal, sys.maxsize)", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decryptor(byte_string: bytes, IV: bytes, key: bytes) -> bool:\n decrypted_string = AES_CBC_decrypt(byte_string, IV, key)\n print(len(decrypted_string), decrypted_string)\n if not check_ascii_compliance(decrypted_string):\n raise Exception(decrypted_string)", "def testKeyMismatch(self):\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n wrong_key = crypto.RSAPrivateKey().GenerateKey()\n decrypt_wrapper = uploads.DecryptStream(\n readers_private_key=self.readers_private_key,\n writers_public_key=wrong_key.GetPublicKey(),\n outfd=self.outfd)\n\n # We should know after very few bytes that the key is wrong. The\n # first encrypted chunk is the serialized signature which is 518\n # bytes in the test. Adding crypto headers gives a chunk size of\n # 570. After 600 bytes we should definitely bail out.\n with self.assertRaises(crypto.VerificationError):\n decrypt_wrapper.write(encrypted_data[:600])", "def decrypt(encryption_value):\n Common.logger.info(\"Decryption job started started\")\n key = Common.get_config_value(\"jenkins_key\")\n fkey = Fernet(key.encode())\n decrypt_value = fkey.decrypt(encryption_value.encode())\n return decrypt_value", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt(cipherBackup: str, password: str) -> str:\n\n try:\n return comp.desencriptar(cipherBackup, password)\n\n except:\n return '1'", "def decrypt(self, full_encrypted_value, **options):\n\n try:\n self._validate_format(full_encrypted_value, **options)\n encrypted_part = self._get_encrypted_part(self._prepare_input(full_encrypted_value),\n **options)\n\n return self._decrypt(encrypted_part, **options)\n except Exception as error:\n raise DecryptionError(error) from error", "def test_sealedbox_enc_dec(self):\n # Encrypt with pk\n encrypted_data = nacl.sealedbox_encrypt(data=self.unencrypted_data, pk=self.pk)\n\n # Decrypt with sk\n decrypted_data = nacl.sealedbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)" ]
[ "0.7508658", "0.73156184", "0.7232054", "0.6764023", "0.66845644", "0.657834", "0.6562093", "0.65003294", "0.6460941", "0.6459456", "0.6367024", "0.63215554", "0.6274202", "0.62535083", "0.62495065", "0.62276834", "0.6201334", "0.6190526", "0.618059", "0.61519164", "0.61452615", "0.6136156", "0.6099021", "0.606818", "0.6026457", "0.5960535", "0.59432477", "0.59120756", "0.5902871", "0.5895447" ]
0.76258427
0
It accepts a custom decryption key.
def test_decrypt_key(self): key = b'0' * 32 encrypted = encrypt('message', key=key) assert decrypt(encrypted, key=key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def setup_key_decrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(\"Please enter the key that was used to encrypt your message.--> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key.\")\t\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def _decrypt(self, key, value):\n payload = EncryptedPayload.from_json(value)\n if not payload:\n return value\n\n decrypted = self._kms_crypto.decrypt_payload(payload)\n if not decrypted:\n return value\n\n key_prefix = '%s=' % key\n if not decrypted.startswith(key_prefix):\n return value\n\n return decrypted[len(key_prefix):]", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def decrypt(self, key, dir):\n self.encrypt(key, dir)", "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def decrypt(self, key, device, private_key):\n device_key = base64.b64decode(self.keys[device.id.hex])\n\n master_key = private_key_decrypt(private_key, device_key)\n\n if master_key is None:\n return\n\n return fernet_decrypt(self.values[key], master_key, self.salt)", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt(self, data):", "def populate_ios_decryption_key(self, base64_key: bytes):\r\n # case: the base64 encoding can come in garbled, but still pass through decode_base64 as an\r\n # un-unicodeable 256 byte(?!) binary blob, but it base64 decodes into a 16 byte key. The fix\r\n # is to decode_base64 -> encode_base64, which magically creates the correct base64 blob. wtf\r\n try:\r\n base64_str: str = base64_key.decode()\r\n except UnicodeDecodeError:\r\n # this error case makes no sense\r\n base64_str: str = encode_base64(decode_base64(base64_key)).decode()\r\n \r\n try:\r\n IOSDecryptionKey.objects.create(\r\n file_name=self.file_name,\r\n base64_encryption_key=base64_str,\r\n participant=self.participant,\r\n )\r\n return\r\n except ValidationError as e:\r\n print(f\"ios key creation FAILED for '{self.file_name}'\")\r\n # don't fail on other validation errors\r\n if \"already exists\" not in str(e):\r\n raise\r\n \r\n extant_key: IOSDecryptionKey = IOSDecryptionKey.objects.get(file_name=self.file_name)\r\n # assert both keys are identical.\r\n if extant_key.base64_encryption_key != base64_str:\r\n print(\"ios key creation unknown error 2\")\r\n raise IosDecryptionKeyDuplicateError(\r\n f\"Two files, same name, two keys: '{extant_key.file_name}': \"\r\n f\"extant key: '{extant_key.base64_encryption_key}', '\"\r\n f\"new key: '{base64_str}'\"\r\n )", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(self, input, key, iv) :\n pass", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError" ]
[ "0.6593434", "0.6539305", "0.6523048", "0.65177274", "0.65156114", "0.6480838", "0.645648", "0.63991714", "0.63842076", "0.63480246", "0.634022", "0.6332533", "0.6332092", "0.631312", "0.62985706", "0.6246969", "0.6217749", "0.6211802", "0.6208556", "0.62013805", "0.6200206", "0.61917937", "0.61856425", "0.61591977", "0.61469704", "0.6130895", "0.61282086", "0.6117785", "0.6110495", "0.61065316" ]
0.69840544
0
It reencrypts an encrypted message using a new key.
def test_rekey(self): old_key = b'0' * 32 new_key = b'1' * 32 old_encrypted = encrypt('message', key=old_key) new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key) assert decrypt(new_encrypted, key=new_key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def encrypt(key, plaintext, cipher):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n p = plaintext.read()\n c = rsa.encrypt(p, k)\n\n cipher.write(c)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except OverflowError:\n click.echo(\"ERROR: Message is to long for encryption with the given key.\")", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def update(self, plaintext):\n return self._encryptor.update(plaintext)", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)", "def encrypt_message(message: str, key: int = 17):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n shifted_alphabet = alphabet[key:] + alphabet[:key]\n encrypted_message = \"\"\n\n for i in message.upper():\n\n # Use encryption for letters only, keep the rest\n if i in alphabet:\n\n # Find index in alphabet\n for idx, l in enumerate(alphabet):\n if i == l:\n\n # Add letter in shifted alphabet\n # with this index to the message\n encrypted_message += shifted_alphabet[idx]\n else:\n encrypted_message += i\n\n return encrypted_message", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def rsa_encrypt(message, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.encrypt)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def caesarShiftStringOps(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n\n if not encrypt:\n key = -key\n\n shiftedAlphabet = alphabet[key:] + alphabet[:key]\n return message.translate(str.maketrans(alphabet, shiftedAlphabet))", "def func(plaintext, key):\n ciphertext = xor(plaintext, key)\n return ciphertext", "def decrypt_message(encrypted_message):", "def encrypt(message, key):\n\tnumericRepresentation = []\n\tfor c in message:\n\t\tnumericRepresentation.append(ord(c) - 65)\n\n\tcipher = \"\"\n\tfor x in numericRepresentation:\n\t\tcipher += chr((x + key) % 26 + 65)\n\n\treturn cipher", "def caesarShift(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n newMessage = \"\"\n\n # Change shift direction depending on encrypting or decrypting\n if not encrypt:\n key = -key\n\n # Loop through the message\n for char in message:\n index = alphabet.find(char)\n newMessage += alphabet[(index + key) % 26]\n\n return newMessage", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def exchange_key(connection, pub_key):\r\n\r\n if main.diffe_key_exchange is False:\r\n # Get the server's public key\r\n server_pub_key_bytes = connection.recv(1024)\r\n\r\n # Send public key\r\n connection.sendall(rsa.PublicKey.save_pkcs1(pub_key))\r\n\r\n else:\r\n # Rounds of bit-shifting and XOR\r\n rounds = 64\r\n\r\n while True:\r\n\r\n # Generate 4096-bit keys (RFC 3526 Group 16)\r\n client_diffe_key = pyDHE.new(16)\r\n shared_secret = client_diffe_key.negotiate(connection)\r\n\r\n # Encrypt\r\n encrypted = int(binascii.hexlify(rsa.PublicKey.save_pkcs1(pub_key)).decode(), 16)\r\n for x in range(0, rounds):\r\n encrypted = encrypted ^ (shared_secret ** rounds)\r\n encrypted = encrypted << rounds\r\n encrypted = int(str(encrypted)[::-1])\r\n\r\n # Decrypt\r\n decrypted = encrypted\r\n decrypted = int(str(decrypted)[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n # Check if able to decrypt\r\n try:\r\n binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n client_success = True\r\n\r\n # Generate new keys upon failure and try again\r\n except UnicodeDecodeError:\r\n client_success = False\r\n pass\r\n except binascii.Error:\r\n client_success = False\r\n pass\r\n\r\n # Notify client about encryption status\r\n server_success = connection.recv(1024)\r\n if client_success is False:\r\n connection.send(b'DHE')\r\n else:\r\n connection.send(b'CONTINUE')\r\n\r\n # Get encryption status from client\r\n if client_success is False or server_success == b'DHE':\r\n pass\r\n elif server_success == b'CONTINUE':\r\n break\r\n\r\n # Hold encrypted server key\r\n server_encrypted = b''\r\n\r\n # Receive encrypted key from the server\r\n while True:\r\n data = connection.recv(8192)\r\n if data == b'ENDED':\r\n break\r\n elif data[-5:] == b'ENDED':\r\n server_encrypted += data[:-5]\r\n break\r\n server_encrypted += data\r\n\r\n # Send the encrypted key to the server\r\n connection.sendall(bytes(hex(encrypted).encode()))\r\n connection.send(b'ENDED')\r\n\r\n # Decrypt the client's public key\r\n decrypted = int(server_encrypted, 16)\r\n decrypted = int(str(int(decrypted))[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n server_pub_key_bytes = binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n\r\n server_pub_key = rsa.PublicKey.load_pkcs1(server_pub_key_bytes)\r\n # Determine max message size\r\n max_message_size = common.byte_size(server_pub_key.n) - 11\r\n\r\n # Return crypto key information\r\n return server_pub_key, server_pub_key_bytes, max_message_size", "def encryptCaesar(message, key):\r\n newMessage = \"\"\r\n for char in message:\r\n if char in alphaLower:\r\n newLetterPosition = (alphaLower.index(char) + key) % 26\r\n newMessage += alphaLower[newLetterPosition]\r\n elif char in alphaUpper:\r\n newLetterPosition = (alphaUpper.index(char) + key) % 26\r\n newMessage += alphaUpper[newLetterPosition]\r\n else: newMessage += char\r\n return newMessage", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def encrypt(message, key):\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n new_st = ''\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key < 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key]\r\n\r\n if alpha_upper.count(x) != 0 and alpha_upper.index(x) + key < 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key]\r\n\r\n if alpha_upper.count(x)!= 0 and alpha_upper.index(x) + key >= 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key - 26]\r\n\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key >= 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key - 26]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n # --- CODE ENDS HERE\r", "def encrypt(self, message, key=None):\n if key is None:\n key = self.public_key\n encrypter = RSA.importKey(key)\n return encrypter.encrypt(message, 2048)", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def encrypt(message, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public key with encrypt\")\n\n return chopstring(message, pub_key.e, pub_key.n, encrypt_int)", "def encrypt(cls, plaintext, aad, key, iv):", "def encrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.encrypt(message.encode())" ]
[ "0.7298857", "0.7298796", "0.71268785", "0.66500294", "0.6324676", "0.62568414", "0.6190534", "0.6128457", "0.61111814", "0.6098401", "0.60541093", "0.59321904", "0.5931756", "0.5884302", "0.587639", "0.58716655", "0.5832455", "0.58202076", "0.5819175", "0.5817855", "0.58072025", "0.5782812", "0.5781394", "0.57752514", "0.5763925", "0.57580334", "0.5740395", "0.5723917", "0.56983775", "0.5694362" ]
0.7975824
0
It raises an error when trying to rekey a nonencrypted value.
def test_rekey_non_encrypted(self): with pytest.raises(EncryptionError): rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def testKeyMismatch(self):\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n wrong_key = crypto.RSAPrivateKey().GenerateKey()\n decrypt_wrapper = uploads.DecryptStream(\n readers_private_key=self.readers_private_key,\n writers_public_key=wrong_key.GetPublicKey(),\n outfd=self.outfd)\n\n # We should know after very few bytes that the key is wrong. The\n # first encrypted chunk is the serialized signature which is 518\n # bytes in the test. Adding crypto headers gives a chunk size of\n # 570. After 600 bytes we should definitely bail out.\n with self.assertRaises(crypto.VerificationError):\n decrypt_wrapper.write(encrypted_data[:600])", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def test_kms_re_encrypt_fails_client_error(self):\n self.mock_kms.re_encrypt.side_effect = self.client_error\n b64_secret = base64.b64encode(self.secret)\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, b64_secret)", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def gpgkey_error(self, repo_id, error):\n self.send(repo_id, 'gpgkey_error', error)", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def test_encrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = None\n\n with pytest.raises(EncryptionError):\n encrypt('message')", "def test_decrypt_format(self):\n with pytest.raises(EncryptionError):\n decrypt('message')", "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def _check_key(self, key):\n raise NotImplementedError", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def test_set_redis_no_val():\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')", "def test_wrong_course_key(self):\n def mock_from_string(*args, **kwargs):\n \"\"\"Mocked function to always raise an exception\"\"\"\n raise InvalidKeyError('foo', 'bar')\n\n self.client.login(username=self.student.username, password=self.password)\n with patch('opaque_keys.edx.keys.CourseKey.from_string', side_effect=mock_from_string):\n resp = self.client.get(self.get_url(self.student.username))\n\n assert resp.status_code == status.HTTP_404_NOT_FOUND\n assert 'error_code' in resp.data\n assert resp.data['error_code'] == 'invalid_course_key'", "def test_encrypt_no_key_id(self):\n encryptor = self.test_init()\n encryptor.key_id = None\n\n with self.assertRaises(IceItException):\n encryptor.encrypt('blah', 'blah-again')", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))" ]
[ "0.7787494", "0.7165129", "0.7140279", "0.71330994", "0.69257516", "0.6686824", "0.6646899", "0.6348278", "0.6192973", "0.6057841", "0.60305434", "0.6001477", "0.5970502", "0.58401686", "0.5836722", "0.5806258", "0.5804228", "0.57949257", "0.57405263", "0.57361877", "0.5714895", "0.5691836", "0.5686841", "0.56837195", "0.5633617", "0.5630989", "0.5623438", "0.560158", "0.5600826", "0.5600826" ]
0.82164097
0
It raises an error when given an invalid new key.
def test_rekey_key_format(self): old_key = b'0' * 32 encrypted = encrypt('message', key=old_key) with pytest.raises(EncryptionError): rekey(encrypted, old_key=old_key, new_key=b'1' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _newKey(self, key):\n pass", "def _check_key(self, key):\n raise NotImplementedError", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)", "def isValidKey(key):\n return True", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_duplicate_key_identifier(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=list(bundle.keys)[0].key_identifier,\n key_tag=4711,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(ValueError):\n validate_signatures(bundle)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def keyError():\n d = {}\n d['cat']", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_neg_exists_key_invalid_data(self, key, ex_code, ex_msg):\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key)", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")" ]
[ "0.71822304", "0.70056105", "0.69519156", "0.6922246", "0.6791061", "0.6753472", "0.6749198", "0.67076725", "0.66495234", "0.6584771", "0.6557083", "0.65199524", "0.65018636", "0.64907044", "0.6454335", "0.64416265", "0.6438558", "0.6431475", "0.6424363", "0.6396235", "0.63894755", "0.6379695", "0.6369072", "0.6369072", "0.6349258", "0.6336291", "0.6314593", "0.6310023", "0.6263599", "0.6214058" ]
0.7050168
1
Return True if domain is marked sensitive
def is_domain_sensitive(name): query = database.session_query(Domain) query = query.filter(and_(Domain.sensitive, Domain.name == name)) return database.find_all(query, Domain, {}).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_secure_site_enabled\")", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_secure_site_enabled\")", "def is_shared_with_domain(self):\n return self.has_label(SHAREDWITHDOMAIN_LABEL)", "def check_secure():\n return get_config_handler().check_secure()", "def domain_filter(self, url):\n return url_is_from_any_domain(url, self._domain)", "def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped", "def controlled(self):\n if self.crypt_private is not None and self.sign_private is not None:\n return True\n else:\n return False", "def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False", "def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1", "def is_integral_domain(self):\n return False", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False", "def allow_bare_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def secure(self) -> bool:\n return self.get_state(self.args[CONF_OVERALL_SECURITY_STATUS]) == \"Secure\"", "def is_active_domain(self, domain=\"www.google.com\", name_server='1.1.1.1'):\n my_resolver = dns.resolver.Resolver()\n my_resolver.nameservers = [name_server]\n my_resolver.timeout = 3\n my_resolver.lifetime = 3\n try:\n A = my_resolver.query(domain, 'A')\n for i in A.response.answer:\n for j in i.items:\n return self.is_actual_ip(str(j))\n except Exception as e:\n return None", "def option_domains_always_in_scope_enabled(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScopeEnabled/')))", "def allowed(self, request):\n try:\n sdn_enabled = stx_api.sysinv.get_sdn_enabled(request)\n return sdn_enabled\n except Exception:\n return False", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def insecure(self) -> bool:\n return self._insecure", "def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)", "def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")" ]
[ "0.5945297", "0.5943389", "0.5943389", "0.58801776", "0.5860374", "0.5823208", "0.58100474", "0.5775172", "0.57475305", "0.57442623", "0.57067436", "0.56709236", "0.56709236", "0.56709236", "0.56595856", "0.56003463", "0.55976045", "0.5583748", "0.5570339", "0.5522135", "0.5515378", "0.5515378", "0.5505999", "0.5490891", "0.54883003", "0.5410126", "0.5410126", "0.5410126", "0.53990966", "0.53990966" ]
0.7324499
0
Update an existing domain
def update(domain_id, name, sensitive): domain = get(domain_id) domain.name = name domain.sensitive = sensitive database.update(domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Check if domain.provider object exists to make sure\n # duplicate Provider.provider_url is not created\n provider = session.query(Provider).filter(\n Provider.provider_url == domain.provider.provider_url).first()\n if not provider:\n provider = Provider(\n provider_url=request.form[\"provider-url\"].strip())\n\n domain.category.category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"].strip()).first()\n\n domain.domain_name = parse_url(request.form[\"domain-name\"].strip())\n domain.ip = request.form[\"ip-address\"].strip()\n domain.provider.provider_url = parse_url(\n provider.provider_url.strip())\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Updated {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Problem with one of the fields.\".format(\n \"<strong>\", \"</strong>\")\n flash(message, \"danger\")\n return redirect(url_for(\"edit_domain\", domain_name=domain_name))\n\n if request.form[\"submit\"] == \"Save\":\n return redirect(url_for(\"view_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n return redirect(url_for(\"edit_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Obtain list of domain names without tuple to use\n # for domain_pager()\n domain_names = [d.domain_name for d in session.query(\n Domain.domain_name).order_by(Domain.domain_name).all()]\n next_domain, previous_domain = domain_pager(domain_name, domain_names)\n\n kwargs = {\n \"domain\": domain,\n \"domain_name\": domain_name,\n \"category_names\": category_names,\n \"next_domain\": next_domain,\n \"previous_domain\": previous_domain\n }\n return render_template(\"edit_domain.html\", **kwargs)", "def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)", "def test_update_domain_only(self):\n self.test_update()", "def update_domain (cls, base, updated, log):\n # Get new domain name\n domain = cls.detect_domains(nffg=updated)\n if len(domain) == 0:\n log.error(\"No domain detected in new %s!\" % updated)\n return\n if len(domain) > 1:\n log.warning(\"Multiple domain name detected in new %s!\" % updated)\n return\n domain = domain.pop()\n log.debug(\"Update elements of domain: %s in %s...\" % (domain, base.id))\n base_infras = {i.id for i in base.infras if i.domain == domain}\n if len(base_infras) == 0:\n log.warning(\"No Node was found in the base %s! Use merging...\" % base)\n return cls.merge_new_domain(base=base, nffg=updated, log=log)\n # If infra nodes were removed or added, best way is to remerge domain\n else:\n # TODO - implement real update\n log.error(\"Domain update has not implemented yet!\")", "def set_domain(self, domain):\n\n self._domain = domain\n\n self.changed = True", "def post_domain_update(self, resource_id, resource_dict):\n pass", "def change_the_Domain_for_ad_domain_and_click_Save(driver, ad_domain):\n global domain\n domain = ad_domain\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Domain\"]')\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').clear()\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').send_keys(ad_domain)\n assert wait_on_element(driver, 7, '//button[@ix-auto=\"button__SAVE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def ModifyDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def domain_update(self, domain, contact_info, raw=True, **kwargs):\n endpoint = '/Domain/Update'\n\n params = {\n 'Domain' : domain\n }\n\n params.update(contact_info)\n params.update(kwargs)\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response['product'][0]['status'] == 'SUCCESS'", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def replace_domain(address, old_domain, new_domain):\n old_domain_pattern = r'' + old_domain + '$'\n address = re.sub(old_domain_pattern, new_domain, address)\n return address", "def update(request):\n from pprint import pformat\n if 'ipv4' not in request.GET and 'ipv6' not in request.GET:\n return HttpResponse(\"Must specify one or both of ipv4/ipv6 address\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n if not u'domain' in request.GET:\n return HttpResponse(\"Must specify domain\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n\n for ipvx, record_type in ((u'ipv4', 'A'), (u'ipv6', 'AAAA')):\n if ipvx not in request.GET:\n continue\n record, created = Record.objects.get_or_create(\n name=request.GET['domain'],\n type=record_type,\n )\n record.domain_id = 1\n record.ttl = 1\n record.auth = True\n record.content = request.GET[ipvx]\n record.save()\n\n return HttpResponse(\"Saved record(s)\")", "def set_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(kwargs['domain_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the domain\")\n print (\"No response from Server while set the domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain", "def change_domain(self, new_domain):\n self.domain=new_domain\n for pm in self._maps.values():\n pm.change_domain(new_domain)", "def update_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:\n pass", "def domain(self, domain):\n self._domain = domain", "def domain(self, domain):\n self._domain = domain", "def save_domain(self):\n del_domain = 0\n save_domain = 0\n\n sending_btn = self.dlg.sender().objectName()\n if sending_btn[:-1] == \"uBtnRemoveDomain\":\n del_domain = sending_btn[-1]\n if sending_btn[:-1] == \"uBtnSaveDomain\":\n save_domain = sending_btn[-1]\n\n keys = {}\n for entry in range(1, len(self.domains) + 2):\n if int(del_domain) == entry:\n continue\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(entry)).text()\n key = getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).text().strip()\n if domain and key:\n keys[domain] = key\n self.api_key_instance.set_api_keys(keys)\n\n # remove store capability docs for the removed or add domain/key\n # if they already exits .i.e these will be reloaded\n if save_domain:\n ui_elem_num = save_domain\n else:\n ui_elem_num = del_domain\n\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(ui_elem_num)).text()\n self.local_store.del_domains_xml(domain)\n\n # load / Reload service data\n self.load_settings()\n self.dlg.uWarningSettings.hide()\n self.dlg.uLabelWarning.hide()\n if self.curr_list_wid_index is not None:\n self.dlg.uListOptions.setCurrentItem(self.curr_list_wid_index)\n else:\n self.dlg.uListOptions.setCurrentRow(0)\n\n self.dlg.uStackedWidget.setCurrentIndex(0)\n self.services_loaded = False # key change, load data again\n self.load_ui()", "def domain(self, domain):", "def domain(self, domain):\n\n self._domain = domain", "def domain(self, domain):\n\n self._domain = domain", "def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def update(domain_name=None, primary_ns=None, admin_mail=None, refresh=None,\n retry=None, expire=None, default_ttl=None, patch=False, **kwargs):\n url = 'https://api.cloudns.net/dns/modify-soa.json'\n\n params = Parameters({\n 'domain-name': domain_name,\n 'primary-ns': primary_ns,\n 'admin-mail': admin_mail,\n 'refresh': {\n 'value': refresh,\n 'min_value': 1200,\n 'max_value': 43200,\n },\n 'retry': {\n 'value': retry,\n 'min_value': 180,\n 'max_value': 2419200,\n },\n 'expire': {\n 'value': expire,\n 'min_value': 1209600,\n 'max_value': 2419200,\n },\n 'default-ttl': {\n 'value': default_ttl,\n 'min_value': 60,\n 'max_value': 2419200,\n },\n })\n\n return requests.post(url, params=params.to_dict())", "def update_type_A_domain(self, domain, point_to):\n r53 = self.connections.get_route53()\n\n # Get Zone ID\n zone = r53.get_zone(self.env.domain)\n zone_id = zone.id\n\n if not zone.get_a(domain):\n sys.exit(\"\\nAbort: {} does not exists! \" \\\n \"Please create first!\".format(domain))\n\n # Commit change\n try:\n changes = ResourceRecordSets(connection=r53, hosted_zone_id=zone_id)\n change = changes.add_change(action='UPSERT', name=domain, type=\"A\")\n change.set_alias(\n alias_hosted_zone_id=zone_id,\n alias_dns_name=point_to,\n alias_evaluate_target_health=False)\n changes.commit()\n except DNSServerError:\n raise\n except Exception:\n print(\"Unexpected error: {}\".format(traceback.format_exc()))\n sys.exit(1)\n\n # Print record set\n record = zone.get_a(domain)\n print(\"\\nUpdated record set is:\\n{}\".format(record.to_print()))", "def update_instance_url(setting):\n site_obj = Site.objects.all().order_by('id').first()\n site_obj.domain = setting.value\n site_obj.save()", "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "def update_domain_endpoint_options(DomainName=None, DomainEndpointOptions=None):\n pass", "def test_update_domain_with_a_record(self):\n a_record = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n self.test_update(updateRecords=a_record)" ]
[ "0.7216133", "0.6992674", "0.69599164", "0.6770144", "0.6691671", "0.6631902", "0.66250306", "0.66209924", "0.6600841", "0.65592813", "0.6530738", "0.65130335", "0.6412239", "0.63919693", "0.63866", "0.6360199", "0.6326602", "0.6326602", "0.63022065", "0.6297731", "0.62386405", "0.62386405", "0.61449534", "0.6136164", "0.6086499", "0.6071909", "0.60690707", "0.60583305", "0.60529774", "0.5997694" ]
0.77741724
0
Establish a TCP connection to the indiserver via port 7624
def connect_to_indi(): indiclient=IndiClient() indiclient.setServer("localhost",7624) # Ensure the indiserver is running if (not(indiclient.connectServer())): print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run") print(" indiserver indi_sx_ccd") sys.exit(1) return indiclient
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SCPI_sock_connect(ipaddress,port=5025):\n\n try:\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n #session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\n session.connect((ipaddress,port))\n except IOError:\n print( \"Failed to connect to the instrument, pleace check your IP address\" )\n return\n return session", "def connect_to_server(self):\r\n self.client_socket.connect((SERVER_IP, SERVER_PORT))\r\n print('[CLIENT] connected to streamer.')", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def connect(self):\n sock_version = socket.AF_INET if self.ip_version == 4 else socket.AF_INET6\n with socket.socket(sock_version, socket.SOCK_STREAM) as sock:\n sock.connect((self.server_ip, self.port))\n print(\"Client connected\")\n self.__send_request(\"01\", sock)\n\n while True:\n response = self.__receive_response(sock)\n if len(response) >= 2:\n msg_id_code = int(response[:2])\n if msg_id_code == 2:\n udp_port = self.__request_info_file(response, sock)\n if msg_id_code == 4:\n self.__handle_udp_transfer(self.server_ip, udp_port, sock)\n if msg_id_code == 5:\n print(\"Closing connection\")\n sock.close()\n return 0\n if msg_id_code == 8:\n print(\"Invalid file name. Max size: 15bytes\")\n sock.close()\n return -1", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def connectToServer(self):\n self.client = Client(base_url = self.server)\n self.ping()", "def connect(self):\n print(\"Connecting\")\n self.socket.connect((self.ip, self.port))\n self.startReading()", "def init_connection(srv_ip, srv_port):\n svrsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n srvaddr = (srv_ip, srv_port)\n svrsock.bind(srvaddr)\n print('Laptop IP:', srv_ip)\n print('Laptop Port:', srv_port)\n svrsock.listen(1)\n print('waiting to be connected...')\n clnsock, clnaddr = svrsock.accept()\n print('\\nconnected!\\n')\n print('IOS IP:', clnaddr[0])\n print('IOS PORT:', clnaddr[1])\n svrsock.settimeout(0)\n clnsock.settimeout(0)\n return svrsock, clnsock, clnaddr", "def opensock(ipaddr,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((ipaddr,port))\n \n return s", "def connect(self):\n self.conn.connect()", "def connectToServer(self):\r\n\t\tself.rtspSocket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ttry:\r\n\t\t\tself.rtspSocket_client.connect((self.serverAddr, self.serverPort))\r\n\t\texcept:\r\n\t\t\tprint(\"Fail to connect to server\")", "def connect(self, host, port):\n pass", "def open_tcp_port():\n \n # Open an incoming tcp port to access the cluster endpoint\n try:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))", "def establish_connection(self):\n print('Listening...')\n self.socket.listen()\n self.conn, addr = self.socket.accept()\n print('Received connection', addr)", "def start(self):\n # create socket\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 10 minutes for timeout\n self._socket.settimeout(600)\n except socket.error as msg:\n logging.error(\"Can't create socket. Error code: {}, msg: {}\".format(*msg))\n raise\n\n # Open TCP connection\n try:\n self._socket.connect(self.address)\n except socket.error:\n logging.error(\"Can't connect to the server on {}:{}\".format(*self.address))\n raise", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self):\n self.sock = s.socket(s.AF_INET,s.SOCK_STREAM)\n self.sock.connect((self.remote_host,\n self.remote_port))", "def connect(self, addr):\n self._outbound = True\n rules = firewall.DefaultRule()\n self._state = SocketState(self._got_remote)\n self._endpoint = SocketEndpoint(rules, None)\n self._i2cp = client.Connection(self._endpoint)\n self._i2cp.open()\n while not self._state.is_connected():\n time.sleep(0.1)", "def open(self):\n try:\n if self.verbose:\n print \"Trying to open connection to Leica at \",self.IP_address,\":\",str(self.port)\n self.leicasocket = socket.socket()\n self.leicasocket.connect((self.IP_address,self.port))\n if self.verbose:\n print(\"Connected.\")\n self.connected=True\n return True\n except:\n if self.verbose:\n print \"Error opening connection to \", self.IP_address\n self.connected=False\n return False", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def connect(self):\n\n print(\"Connecting to server at {}:{}\".format(self.hostname, self.port))\n\n self._sock = socket.socket()\n self._sock.setblocking(True)\n self._sock.connect((self.hostname, self.port))\n self._sockfile = self._sock.makefile(encoding=\"utf-8\")\n self._connected = True\n\n if self.password:\n self._sendmsg(\"PASS :{}\".format(self.password))\n self._sendmsg(\"NICK {}\".format(self.nickname))\n self._sendmsg(\"USER {} 0 * :ORE Utility Bot\".format(getpass.getuser()))\n if self.ident_password:\n self._sendmsg(\"PRIVMSG NickServ :identify {}\".format(\n self.ident_password))\n self._sendmsg(\"JOIN {}\".format(\",\".join(self.channels)))", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def _initRemoteMDSConnection(shotno):\n\tconn = _mds.Connection(_pref._HBT_SERVER_ADDRESS+':8003');\n\tconn.openTree('hbtep2', shotno);\n\treturn conn" ]
[ "0.62255913", "0.62050253", "0.6195879", "0.6171926", "0.6135462", "0.6120585", "0.606939", "0.60673463", "0.602371", "0.6017615", "0.60021055", "0.60011834", "0.5976623", "0.5967753", "0.5963107", "0.59526026", "0.59498817", "0.5947324", "0.5940068", "0.5910819", "0.59102845", "0.5908937", "0.5888914", "0.5888275", "0.5881181", "0.5865311", "0.5864343", "0.5863684", "0.58565176", "0.58555764" ]
0.68877107
0
Connection routine for the CCD (given below in ccd variable). The following CCD properties are accessed. More can be found by going to indilib.org. CONNECTION Switch CCD_EXPOSURE Number CCD1 BLOB CCD_BINNING Number CCD_ABORT_EXPOSURE Number CCD_TEMPERATURE Number CCD_COOLER Switch CCD_FRAME_TYPE Switch
def connect_to_ccd(): ccd="SX CCD SXVR-H694" device_ccd=indiclient.getDevice(ccd) while not(device_ccd): time.sleep(0.5) device_ccd=indiclient.getDevice(ccd) print("Searching for device...") print("Found device") ccd_connect=device_ccd.getSwitch("CONNECTION") while not(ccd_connect): time.sleep(0.5) ccd_connect=device_ccd.getSwitch("CONNECTION") if not(device_ccd.isConnected()): ccd_connect[0].s=PyIndi.ISS_ON # the "CONNECT" switch ccd_connect[1].s=PyIndi.ISS_OFF # the "DISCONNECT" switch indiclient.sendNewSwitch(ccd_connect) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") while not(ccd_exposure): time.sleep(0.5) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") # inform the indi server that we want to receive the # "CCD1" blob from this device indiclient.setBLOBMode(PyIndi.B_ALSO, ccd, "CCD1") ccd_ccd1=device_ccd.getBLOB("CCD1") while not(ccd_ccd1): time.sleep(0.5) ccd_ccd1=device_ccd.getBLOB("CCD1") # get access to setting the CCD's binning value ccd_bin=device_ccd.getNumber("CCD_BINNING") while not(ccd_bin): time.sleep(0.5) ccd_bin=device_ccd.getNumber("CCD_BINNING") # get access to aborting the CCD's exposure ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") while not(ccd_abort): time.sleep(0.5) ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") # get access to the CCD's temperature value ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") while not(ccd_temp): time.sleep(0.5) ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") # get access to switching the CCD's cooler on/off ccd_cooler=device_ccd.getSwitch("CCD_COOLER") while not(ccd_cooler): time.sleep(0.5) ccd_cooler=device_ccd.getSwitch("CCD_COOLER") # get access to switching the CCD's image frame type ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") while not(ccd_frame): time.sleep(0.5) ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") return ccd_exposure, ccd_ccd1, ccd_bin, ccd_abort, ccd_temp, ccd_cooler, ccd_frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for open circuit measurement\n voc = oc_data_point[3] # open circuit voltage measurement\n print('Open circuit voltage: ', voc)\n write_data_tofile(oc_data_point) # write data to file\n \n return voc", "def ccd(self):\n self.spectrum = self.spectrum", "def setCcdMode(*argv):", "def openCircuit(srv):", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def connect(self):\n self.vgc = Pfeiffer_VGC_Interface(port=self.settings.port.val, debug=self.settings['debug_mode'])\n\n self.settings.ch1_pressure.connect_to_hardware(read_func=getattr(self,'read_ch1_pressure'))\n \n self.settings.ch1_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch1_sensor_type'))\n \n self.settings.ch2_pressure.connect_to_hardware(read_func=getattr(self,'read_ch2_pressure'))\n \n self.settings.ch2_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch2_sensor_type'))\n \n self.settings.ch3_pressure.connect_to_hardware(read_func=getattr(self, 'read_ch3_pressure'))\n \n self.settings.ch3_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch3_sensor_type'))\n \n self.settings.ch1_sensor_type.read_from_hardware()\n self.settings.ch2_sensor_type.read_from_hardware()\n self.settings.ch3_sensor_type.read_from_hardware()", "def short_circ():\n \n set_mode(mode_cv)\n time.sleep(.250)\n set_CV_volts(0.1)\n time.sleep(.250)\n \n sc_vals = get_input_values()\n sc_data_point = data_point(sc_vals)\n jsc = sc_data_point[4]\n print('Short circuit current: ', jsc)\n write_data_tofile(sc_data_point)\n\n return jsc", "def cdi(self):\n from infapy.cdi import CDI\n infapy.log.info(\"Created the cdi object to access the iics cdi apis\")\n return CDI(self._v3,self._v2,self._v2BaseURL,self._v3BaseURL,self._v3SessionID,self._v2icSessionID)", "async def _raw_cdc_data(self) -> Dict[str, Any]:\n data = await self._request(\"get\", \"map/cdc\")\n return cast(Dict[str, Any], data)", "def _read_cardiochip(self):\n cur_leadstatus = 0\n sample_count =0\n while self.connected:\n sample_count+=1\n #check for sync bytes\n readbyte = ord(self.ser.read(1))\n #print readbyte, SYNC_BYTE\n if readbyte != SYNC_BYTE:\n continue\n readbyte = ord(self.ser.read(1))\n if readbyte != SYNC_BYTE:\n continue\n\n #parse length byte\n while True:\n pLength = ord(self.ser.read(1))\n if pLength != SYNC_BYTE:\n break\n if pLength > 169:\n continue\n #print \"L: %i\" % pLength\n\n # collect payload bytes\n payload = self.ser.read(pLength)\n payload = [ord(x) for x in payload] #convert to int from string\n #print \"payload: \" + str(payload).strip('[]')\n # ones complement inverse of 8-bit payload sum\n checksum = sum(payload) & 0xFF\n checksum = ~checksum & 0xFF\n\n # catch and verify checksum byte\n chk = ord(self.ser.read(1))\n #print \"chk: \" + str(checksum)\n if chk != checksum:\n print \"checksum error, %i != %i\" % (chk, checksum)\n continue\n\n output = self._parseData(payload)\n\n lead_status = next(( d for d in output if 'leadoff' in d), None)\n if lead_status is not None:\n if cur_leadstatus != lead_status['leadoff']:\n #we have a change\n if lead_status['leadoff']==200:\n print \"LEAD ON\"\n elif lead_status['leadoff']==0:\n print \"LEAD OFF\"\n cur_leadstatus = lead_status['leadoff']\n\n # store the output data in a queue\n # first, create a tuple with the sample index and dict with the timestamp and ecg\n ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)\n if ecgdict is not None and sample_count>self.Fs*2:\n #let's just ignore the first 2 seconds of crappy data\n ecgdict[1]['leadoff'] = cur_leadstatus\n #print ecgdict[1]\n self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys\n\n return", "def main() :\n #fname = '/reg/d/psdm/CXI/cxi35711/hdf5/cxi35711-r0009.h5'\n #dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDs1.0:Cspad.0/data'\n #event = 1\n\n fname = '/reg/d/psdm/CXI/cxi37411/hdf5/cxi37411-r0039.h5'\n dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDsd.0:Cspad.0/data'\n event = 1\n\n print 'Default CSPad configuration pars:'\n cspadconfig.printCSPadConfigPars()\n\n print '\\nCSPad configuration pars: for fname, dsname, event =\\n', fname, '\\n', dsname, '\\n', event\n cspadconfig.setCSPadConfiguration( fname, dsname, event ) # This will set current CSPad configuration\n cspadconfig.printCSPadConfigPars()", "def CCDpowerup(self):\n #starting drain voltages on CABAC\n drains = {\"OD\": 29, \"GD\": 24, \"RD\": 18}\n self.send_cabac_config(drains)\n\n time.sleep(1)\n\n #starting OG voltage on CABAC\n og = {\"OG\": 3.5}\n self.send_cabac_config(og)\n\n time.sleep(1)\n\n #sets clock rails\n dacs = {\"V_SL\": 0, \"V_SH\": 8.03, \"V_RGL\": 0, \"V_RGH\": 8.03, \"V_PL\": 0, \"V_PH\": 9.13}\n self.fpga.set_clock_voltages(dacs)\n\n time.sleep(1)\n\n #sets clock currents on CABAC\n iclock = {\"IC\": 255}\n self.send_cabac_config(iclock)\n\n time.sleep(1)\n\n #puts current on CS gate\n for stripe in self.stripes:\n self.fpga.set_current_source(0xfff, stripe)\n\n #rewrite default state of sequencer (to avoid reloading functions)\n self.fpga.send_function(0, self.seq.get_function(0))\n\n time.sleep(0.1)\n\n #now is the time to the backsubstrate voltage (elsewhere)\n print(\"CCD start-up sequence complete on REB, ready for Back Substrate.\")", "def _add_control_channel(self, attrs):\n _cable_data = {}\n _cable_data[\"crate\"] = self._crate\n _cable_data[\"module\"] = self._module\n _cable_data[\"channel\"] = int(attrs.get('number', \"\"))\n _cable_data[\"name\"] = str(attrs.get('name', \"\"))\n self._data.append(_cable_data)", "def control_change(self, channel, cc, value):\n knob, bank = self.decode_mpd218_cc(cc)\n log.debug(\"Winch control change %d on knob %d bank %d\", cc, knob, bank)\n\n if knob == 1: # Knob #1 on MPD218, use to control resonant frequency\n #self.frequency = 0.05 + 0.1 * value\n self.frequency = 5.00\n self.set_freq_damping()\n\n elif knob == 2: # Knob #2 on on MPD218, use to control damping ratio\n #self.damping_ratio = 0.05 + 0.01 * value\n self.damping_ratio = 1.32\n self.set_freq_damping()", "def get_cdelt_dcflag(hd):\n cdelt = None\n if 'CDELT1' in hd:\n cdelt1 = hd['CDELT1']\n elif 'CD1_1' in hd:\n cdelt1 = hd['CD1_1'] # SDSS style\n\n dc_flag = 0\n if 'DC-FLAG' in hd:\n dc_flag = hd['DC-FLAG']\n elif cdelt1 < 1e-4:\n import warnings\n warnings.warn('WARNING: CDELT1 < 1e-4, Assuming log wavelength scale')\n dc_flag = 1\n\n return cdelt1, dc_flag", "def _get_cbase(self):\n from PSCalib.CalibParsBasePnccdV1 import CalibParsBasePnccdV1\n return CalibParsBasePnccdV1()", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def _write_coft(parameters):\n # Format\n fmt = block_to_format[\"COFT\"]\n fmt = str2format(fmt[5])\n\n values = [x for x in parameters[\"connection_history\"]]\n out = write_record(values, fmt, multi=True)\n\n return out", "def connect_dmm2110():\n address = 'USB0::0x05E6::0x2110::8010814::INSTR'\n rm = visa.ResourceManager()\n return rm.open_resource(address)", "def setCSPadConfigurationFromOpenFile( self, h5file, dsname, event=0 ):\n if gm.CSpad2x2ElementIsInTheName(dsname) :\n print 'getCSpadConfiguration(...): This is a CSpad2x2Element. Special configuration is not required'\n self.isCSPad2x2 = True\n return\n\n self.h5file = h5file\n self.quadNumsInEvent = self.getQuadNumsInEvent( dsname, event )\n self.indPairsInQuads = self.getIndPairsInQuads( dsname )\n #self.printCSPadConfigPars()", "def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)", "def read_s_and_c(self):\n speed = self._previous_speed\n cadence = self._previous_cadence\n for conn, svc in zip(self.cyc_connections, self.cyc_services):\n if not conn.connected:\n speed = cadence = 0\n continue\n values = svc.measurement_values\n if not values:\n if self._cadence_failed >= 3 or self._speed_failed >= 3:\n if self._cadence_failed > 3:\n cadence = 0\n if self._speed_failed > 3:\n speed = 0\n continue\n if not values.last_wheel_event_time:\n continue\n speed = self._compute_speed(values, speed)\n if not values.last_crank_event_time:\n continue\n cadence = self._compute_cadence(values, cadence)\n\n if speed:\n speed = str(speed)[:8]\n if cadence:\n cadence = str(cadence)[:8]\n\n return speed, cadence", "def _process_cdc(self):\n if self.load_type in [LoadType.CDC, LoadType.Full_Load_And_CDC]:\n output_location_for_cdc = os.path.join(self.output_location,\n \"cdc\")\n if not os.path.exists(self.output_location):\n os.mkdir(self.output_location)\n if not os.path.exists(output_location_for_cdc):\n os.mkdir(output_location_for_cdc)\n cdc_init_params = {\n \"output_folder_location\": output_location_for_cdc,\n \"connection_string\": self.connection_string,\n \"table_names\": self.table_names,\n }\n cdc_process = multiprocessing.Process(\n target=PostgresOperator._run_cdc_process,\n args=(self.cdc_plugin_name, cdc_init_params))\n cdc_process.name = \"siirto_cdc_\" + str(uuid.uuid4())\n cdc_process.start()\n return cdc_process\n return None", "def hsdpa_physical_downlink_settings(self):\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 1)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -10.2\r\r\n self.set_pcpich_code_level(carrier=1, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n psch_level = -15.2\r\r\n ssch_level = psch_level\r\r\n pccpch_level = -12.2\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PSCH %s' %psch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:SSCH %s' %ssch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PCCPch %s' %pccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-SCH\", psch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-SCH\", ssch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CCPCH\", pccpch_level))\r\r\n\r\r\n\r\r\n # SCCPH power level and channelisation code\r\r\n sccpch_level = -12.2\r\r\n self.set_dl_chan_code_level(dl_chan='SCCPch', code=2, level_dB=sccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-CCPCH\", sccpch_level))\r\r\n\r\r\n # PICH power level and channelisation code\r\r\n pich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='PICH', code=2, level_dB=pich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"PICH\", pich_level))\r\r\n\r\r\n # AICH power level and channelisation code\r\r\n aich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='AICH', code=3, level_dB=aich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"AICH\", aich_level))\r\r\n\r\r\n # DPCH power and channelisation code\r\r\n dpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='DPCH', code=3, level_dB=dpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"DPCH\", dpch_level))\r\r\n\r\r\n # F-DPCH power and channelisation ocde\r\r\n fdpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='FDPCh', code=6, level_dB=fdpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"F-DPCH\", fdpch_level))\r\r\n\r\r\n # DPCH enhanced settings\r\r\n self.configure_enhanced_dl_dpch()\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -20.2\r\r\n hssch_level_2 = -20.2\r\r\n self.set_hssch_level(hssch_num=1, carrier=1, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=1, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=1, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=1, codeNum=7)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=1)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=1, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n # unscheduled frame type for HSDPA\r\r\n # possible types are 'DUMMy', 'DTX'\r\r\n self.hsdsch_unsched_frames(carrier=1, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n\r\r\n hsdsch_level = -1.2\r\r\n self.set_hsdsch_level(carrier=1, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(code=1, carrier=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n\r\r\n # // *****************************************************************************\r\r\n # Set level and channelization code of E-AGCH, E-HICH and E-RGCH.\r\r\n # *****************************************************************************\r\r\n eagch_level = -20.2\r\r\n ehich_level = -20.2\r\r\n ergch_level = -20.2\r\r\n self.set_dl_chan_code_level(dl_chan='EAGCh', code=3, level_dB=eagch_level)\r\r\n self.set_dl_chan_code_level(dl_chan='EHICh', code=6, level_dB=ehich_level)\r\r\n self.set_dl_chan_code_level(dl_chan='ERGCh', code=6, level_dB=ergch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-AGCH\", eagch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-HICH\", ehich_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-RGCH\", ergch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_physical_downlink_settings_carrier2()", "def open_device(self):\n\t\t# open device\n\t\t# declare ctype variables\n\t\thdwf = c_int()\n\n\t\tprint \"\\nOpening device\"\n\t\tdwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))\n\n\t\tif hdwf.value == 0:\n\t\t\tprint \"failed to open device\"\n\t\t\tquit()\n\n\t\tself.interface_handler = hdwf\n\n\t\thzSysIn = c_double()\n\t\t#max_buffer_size_in = c_int()\n\n\t\tdwf.FDwfDigitalInInternalClockInfo(self.interface_handler, byref(hzSysIn))\n\t\t#dwf.FDwfDigitalInBufferSizeInfo(self.interface_handler, byref(max_buffer_size_in))\n\n\t\tself.internal_clock_freq = hzSysIn.value\n\n\t\t#print \"internal digital in frequency is \" + str(hzSysIn.value)\n\t\t#print \"digital in max buffer size: \" + str(max_buffer_size_in.value)", "def d1in():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_IN)", "def circ_status_event(self, c):\r\n output = [c.event_name, str(c.circ_id), c.status]\r\n if c.path: output.append(\",\".join(c.path))\r\n if c.reason: output.append(\"REASON=\" + c.reason)\r\n if c.remote_reason: output.append(\"REMOTE_REASON=\" + c.remote_reason)\r\n plog(\"DEBUG\", \" \".join(output))\r\n \r\n # Circuits we don't control get built by Tor\r\n if c.circ_id not in self.circuits:\r\n plog(\"DEBUG\", \"Ignoring circuit \" + str(c.circ_id) + \r\n \" (controlled by Tor)\")\r\n return\r\n \r\n # EXTENDED\r\n if c.status == \"EXTENDED\":\r\n # Compute elapsed time\r\n extend_time = c.arrived_at-self.circuits[c.circ_id].last_extended_at\r\n self.circuits[c.circ_id].extend_times.append(extend_time)\r\n plog(\"INFO\", \"Circuit \" + str(c.circ_id) + \" extended in \" + \r\n str(extend_time) + \" sec\")\r\n self.circuits[c.circ_id].last_extended_at = c.arrived_at\r\n \r\n # FAILED & CLOSED\r\n elif c.status == \"FAILED\" or c.status == \"CLOSED\":\r\n PathBuilder.circ_status_event(self, c)\r\n # Check if there are enough circs\r\n self.check_circuit_pool()\r\n return\r\n # BUILT\r\n elif c.status == \"BUILT\":\r\n PathBuilder.circ_status_event(self, c)\r\n # Compute duration by summing up extend_times\r\n circ = self.circuits[c.circ_id]\r\n duration = reduce(lambda x, y: x+y, circ.extend_times, 0.0)\r\n plog(\"INFO\", \"Circuit \" + str(c.circ_id) + \" needed \" + \r\n str(duration) + \" seconds to be built\")\r\n # Save the duration to the circuit for later use\r\n circ.setup_duration = duration\r\n \r\n # OTHER?\r\n else:\r\n # If this was e.g. a LAUNCHED\r\n pass", "def circuit(self):\n return jet.Circuit(num_wires=4, dim=2)", "def hsdpa_physical_downlink_settings_carrier2(self):\r\r\n carrier = 2\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 2)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -11\r\r\n self.set_pcpich_code_level(carrier=carrier, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -18.0\r\r\n hssch_level_2 = -18.0\r\r\n self.set_hssch_level(hssch_num=1, carrier=carrier, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=carrier, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=carrier, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=carrier, codeNum=7)\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=carrier)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=carrier, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n self.hsdsch_unsched_frames(carrier=carrier, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n hsdsch_level = -1.6\r\r\n self.set_hsdsch_level(carrier=carrier, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(carrier=carrier, code=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line" ]
[ "0.5991389", "0.5908873", "0.563912", "0.5434294", "0.54057074", "0.53454", "0.53262013", "0.5306355", "0.52807844", "0.52574426", "0.5246854", "0.5221757", "0.5210567", "0.51662695", "0.51509064", "0.5141476", "0.51294994", "0.5121288", "0.5110107", "0.5095082", "0.50932145", "0.5077673", "0.5072095", "0.5043145", "0.5042253", "0.50344265", "0.4938216", "0.49361742", "0.49114913", "0.4899763" ]
0.7094732
0
Find the last numbered image in the current directory.
def last_image(fileDir): lastNum = 0 lastImg = '' # find the name and number of the last image in the current directory for f in os.listdir(fileDir): if os.path.isfile(os.path.join(fileDir, f)): file_name = os.path.splitext(f)[0] file_name2 = file_name[4:] try: file_num = int(file_name2) if file_num > lastNum: lastNum = file_num lastImg = os.path.join(fileDir, f) except ValueError: 'The file name "%s" is not an integer. Skipping' % file_name return lastNum, lastImg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_counter():\n counter = imageNumStart\n if imageNumOn:\n image_ext = \".jpg\"\n search_str = imagePath + \"/*\" + image_ext\n file_prefix_len = len(imagePath + imageNamePrefix)+1\n try:\n # Scan image folder for most recent jpg file\n # and try to extract most recent number counter from file name\n newest = max(glob.iglob(search_str), key=os.path.getctime)\n count_str = newest[file_prefix_len:newest.find(image_ext)]\n print(\"%s INFO : Last Saved Image is %s Try to Convert %s\"\n % (get_now(), newest, count_str))\n counter = int(count_str)+1\n print(\"%s INFO : Next Image Counter is %i\"\n % (get_now(), counter))\n except:\n print(\"%s WARN : Restart Numbering at %i \"\n \"WARNING: Previous Files May be Over Written.\"\n % (get_now(), counter))\n return counter", "def _get_latest_inc(path):\n\n images = [os.path.join(path, image) for image in os.listdir(path) if '.png' in image]\n\n if not images:\n return 0\n else:\n return int(re.search('(?P<inc>\\d+).png$', max(images, key=os.path.getctime)).group('inc'))", "def find_last_image(self) -> List[str]:\n\n soup = self.load_page()\n url_to_down = soup.select('.comicimage')[0].get('src')\n\n filename = str(os.path.basename(url_to_down))\n\n return [url_to_down, filename]", "def LastImage(self, *args):\n return _BRepAlgo.BRepAlgo_Image_LastImage(self, *args)", "def determine_output_ending():\n file_found = False\n idx = 1\n while not file_found:\n if not os.path.isfile(LOG_DIR + \"/output%04d.png\" % (idx)):\n return \"%04d\" % (idx)\n idx += 1", "def find_last_image(self) -> List[str]:\n\n soup = self.load_page()\n\n txt_from_site = soup.select('.full-image-block')\n\n url_to_down = \"http://www.lunarbaboon.com/\" + str(txt_from_site[0])[\n str(txt_from_site).find('/storage'):str(txt_from_site).find(\n 'SQUARESPACE_CACHEVERSION=') + 37]\n filename = str(url_to_down)[35:48] + \".jpg\"\n\n return [url_to_down, filename]", "def get_latest_iteration(path):\n glob = os.path.join(path, '{}_[0-9]*'.format(FILE_PREFIX))\n log_files = tf.io.gfile.glob(glob)\n\n if not log_files:\n raise ValueError('No log data found at {}'.format(path))\n\n def extract_iteration(x):\n return int(x[x.rfind('_') + 1:])\n\n latest_iteration = max(extract_iteration(x) for x in log_files)\n return latest_iteration", "def get_max_imgid(cursor: db.Cursor, table: str) -> int:\r\n res = cursor.execute(f\"SELECT MAX({cng.BBOX_DB_IMGRNR}) FROM {table}\")\r\n maxid: int = res.fetchall()[0][0]\r\n\r\n if maxid is None:\r\n return -1\r\n else:\r\n return maxid", "def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id", "def get_latest_image():\n return sqldb.get_latest_image()", "def _last_exp_id(name, path):\n\n exp_id = 0\n output_dirs = listdir(path)\n exp_dirs = [s for s in output_dirs if name in s]\n if exp_dirs:\n ids = [int(s.split('_')[-1]) for s in exp_dirs]\n exp_id = max(ids)\n return exp_id", "def get_latest_image_from_directory(self, motion_target_dir):\n try:\n # Use a glob generator to find the newest image\n return max(glob.iglob('{0}/*.jpg'.format(motion_target_dir)),\n key=os.path.getctime)\n except ValueError as e:\n # Raise an error if we did not find any images\n raise MotionAlertError(\"Could not find any images in motion \"\n \"target directory: \"\n \"{0}\".format(motion_target_dir))\n except OSError as e:\n # Raise an error if we cannot access the directory.\n raise MotionAlertError(\"Could not find the motion target dir: \"\n \"{0}\".format(e))", "def get_output_number(dst):\n data = os.listdir(dst)\n print(data)\n if not data == []:\n last_record = sorted(data)[-1]\n print(last_record)\n hiphen_index = last_record.rfind(\"-\")\n print(hiphen_index)\n print(int(last_record[hiphen_index + 1:]))\n return int(last_record[hiphen_index + 1:])\n return 0", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "def find_last_history_version():\n current_max = -1\n for file in os.listdir(os.getcwd()):\n if len(file) > 12 and file[:12] == \"leg_history_\":\n try:\n current_max = max(int(file[12:]), current_max)\n except ValueError:\n continue\n return current_max", "def get_image_index(name: str):\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])", "def get_latest_image(dirpath, valid_extensions=('jpg','jpeg','png')):\n\n # get filepaths of all files and dirs in the given dir\n valid_files = [os.path.join(dirpath, filename) for filename in os.listdir(dirpath)]\n # filter out directories, no-extension, and wrong extension files\n valid_files = [f for f in valid_files if '.' in f and \\\n f.rsplit('.',1)[-1] in valid_extensions and os.path.isfile(f)]\n\n if not valid_files:\n raise ValueError(\"No valid images in %s\" % dirpath)\n\n return max(valid_files, key=os.path.getmtime)", "def new_images_index(self):\n first = ct.c_long()\n last = ct.c_long()\n self.lib.GetNumberNewImages(ct.pointer(first), ct.pointer(last))\n\n return (first.value, last.value)", "def get_oldest_image():\n return sqldb.get_oldest_image()", "def getLastPlotfile(outputDir, test):\n \n plotNum = -1\n \n # start by finding the last plotfile\n for file in os.listdir(outputDir):\n if (os.path.isdir(file) and file.startswith(\"%s_plt\" % (test))):\n key = \"_plt\"\n index = string.rfind(file, key)\n plotNum = max(int(file[index+len(key):]), plotNum)\n\n if (plotNum == -1):\n warning(\"WARNING: test did not produce any output\")\n compareFile = \"\"\n else:\n compareFile = \"%s_plt%5.5d\" % (test, plotNum)\n\n return compareFile", "def get_image_column_row(filename):\n row, column = os.path.splitext(filename)[0][-5:].split(\"_\")\n return (int(column) - 1, int(row) - 1)", "def get_image_id(filename):\n del filename\n global GLOBAL_IMG_ID\n GLOBAL_IMG_ID += 1\n return GLOBAL_IMG_ID", "def get_next_img(self, current_img):\n list = self.listImages.previews\n indx_next = (list.index(current_img) + 1) % len(list)\n next_img = list[indx_next]\n return next_img", "def image_id_at(self, i):\n return i", "def get_latest_file(path):\n try:\n latest_iteration = get_latest_iteration(path)\n return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))\n except ValueError:\n return None", "def getNextImage(self):\n self._images = self._api.updateImageNames()\n \n # Get index from local txt file. \n # This ensures that the image queue does not reset if the Pola restarts.\n try: \n f = open(\"memoryIndex.txt\", 'r')\n self._currentIndex = int((f.read()))\n f.close()\n except: \n self._currentIndex = -1\n \n self._currentIndex = (self._currentIndex + 1) % len(self._images)\n \n f = open(\"memoryIndex.txt\", 'w')\n f.write(str(self._currentIndex))\n f.close()\n \n \n # If there is an internet connection, go online. If not, get the \"no wifi error\"- image queue\n try:\n urllib.request.urlopen('http://torabodin.com/')\n try: \n imageName = self._api.downloadImage(self._currentIndex)\n print(1, imageName)\n self._image= self.loadImage(imageName, True)\n print (self._image)\n \n except: \n self._image = self.getNextImage()\n \n except:\n self._image = self.loadImage(None, False)\n \n \n return self._image", "def findfigure(name):\n found = None\n if \".\" in name:\n if os.path.exists(name):\n found = name\n elif \".\" not in name:\n for suffix in (\".pdf\", \".eps\", \".ps\", \".JPG\", \".jpg\", \".png\"):\n testfile = name + suffix\n if os.path.exists(testfile):\n found = testfile\n if found is None:\n raise RuntimeError(\"Could not find image file {}\".format(name))\n return found, os.path.splitext(found)[-1]", "def get_latest_image(dirpath, valid_extensions=('jpg','jpeg','png')):\n global processed\n f = True\n valid_files = [os.path.join(dirpath, filename) for filename in os.listdir(dirpath)]\n new_files = [z for z in valid_files if not z in processed]\n processed.extend(new_files)\n #print(new_files,'\\n',processed)\n return new_files\n '''valid_files = [f for f in valid_files if '.' in f and f.rsplit('.',1)[-1] in valid_extensions and os.path.isfile(f)]\n if not valid_files:\n f = True\n else:\n f = False\n return max(valid_files, key=os.path.getmtime)'''", "def get_last_file(base_dir, pattern):\n base_dir = Path(base_dir)\n\n return sorted(base_dir.glob(pattern),\n key=lambda x: x.stat().st_ctime, reverse=True)[0]", "def _get_latest_chapter(self, directory):\n files = os.listdir(directory)\n if files:\n print(\"Last saved chapter: \", files[-1])\n last_chapter = files[-1][:-4]\n return self.indices.get(last_chapter, -1)\n return -1" ]
[ "0.7381124", "0.71163946", "0.6752403", "0.65669954", "0.651449", "0.6455671", "0.6257701", "0.62144595", "0.61207634", "0.6083774", "0.60167074", "0.6005787", "0.5981818", "0.59797287", "0.5971447", "0.597075", "0.59043884", "0.5898508", "0.58921754", "0.5875937", "0.581396", "0.5812782", "0.57936674", "0.57638973", "0.5762741", "0.5760847", "0.57509756", "0.57318723", "0.5723892", "0.5679276" ]
0.7685043
0
Sends an exposure command to the CCD given the type of frame and exposure time. The received BLOB is of FITS type and is
def exposure(frameType, expTime): blobEvent.clear() # set the specified frame type if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) # set the value for the next exposure ccd_exposure[0].value=expTime indiclient.sendNewNumber(ccd_exposure) # wait for the exposure blobEvent.wait() for blob in ccd_ccd1: # pyindi-client adds a getblobdata() method to IBLOB item # for accessing the contents of the blob, which is a bytearray in Python image_data=blob.getblobdata() # write the byte array out to a FITS file global imgNum global imgName imgNum += 1 fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits' f = open(fileName, 'wb') f.write(image_data) f.close() imgName = fileName return fileName
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def tcs_exposure_request(image_type, duration = 0, number = 1):\n\n\tvalid_types = ['THERMAL','DARK', 'BIAS', 'FLAT','OBJECT']\n\tvalid = image_type in valid_types\n\n\tif valid:\n\t\timage_type = image_type.lower()\n\t\tif image_type == 'dark':\n\t\t\timage_type = 'thermal'\n\n\t\tif number < 1:\n\t\t\tlogger.error('Invalid number of exposures requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tif duration <0:\n\t\t\tlogger.error('Invalid exposure time requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tcommand_str = 'expose ' + image_type\n\t\tif number != 1:\n\t\t\tcommand_str += ' '+str(number)\n\t\tif image_type != 'bias':\n\t\t\tcommand_str += ' ' + str(duration)\n\t\t\n\t\ttry:\n\t\t\ttcs_respond = send_command(command_str)\n\t\t\n\t\texcept:\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\telse:\n\t\t\t\n\t\t\tcam_temp = get_camera_status()[2]\n\t\t\t#if good_response and cam_temp>-20:\n\t\t\tif float(cam_temp)>-20:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_CCD_WARM\n\t\n\t\t\telse:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_OK\n\t\t\t\n\t\treturn respond\n\n\telse:\n\t\tlogger.error('Invalid image type provided to exposure request '+str(\n\t\t\t\timage_type))\n\t\tprint('Invalid image type provided to exposure request'+str(\n\t\t\timage_type))", "def expose(self, cmd):\n\n expType = cmd.cmd.keywords[0].name\n if expType in ('bias', 'test'):\n expTime = 0.0\n else:\n expTime = cmd.cmd.keywords[\"expTime\"].values[0]\n\n filename, image = self._doExpose(cmd, expTime, expType)\n cmd.finish('exposureState=done')", "def _doExpose(self, cmd, expTime, expType):\n \n image = self.actor.camera.expose(cmd, expTime, expType)\n filename = self.getNextFilename(cmd)\n pyfits.writeto(filename, image, checksum=False, clobber=True)\n cmd.inform(\"filename=%s\" % (qstr(filename)))\n \n return filename, image", "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def Exposure(self, time):\r\n IS_EXPOSURE_CMD_SET_EXPOSURE = 12 #there is a whole list to implement\r\n TIME = DOUBLE(time)\r\n nSizeOfParam = 8\r\n CALL('Exposure', self, \r\n UINT(IS_EXPOSURE_CMD_SET_EXPOSURE), \r\n byref(TIME), \r\n UINT(nSizeOfParam))", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def set_exposure(self, exposure):\n self.logger.info(f'Setting exposure to {exposure}')\n self._driver.ExposureTime.SetValue(exposure)", "def setExposureTime(self, cmd, expTime):\n\n pass", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def Expose(self, fitsfile, seconds=5):\n # make sure the file has good name\n if not fitsfile.endswith('.fits'):\n fitsfile += '.fits'\n tstamp = datetime.now().strftime('_%y%m%d-%H%M')\n match = re.match(r'.*(_\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d)\\.fits', fitsfile)\n if not match:\n fitsfile = fitsfile[:-5] + tstamp + '.fits'\n elif match.group(1) != tstamp:\n fitsfile = fitsfile[:-17] + tstamp + '.fits'\n \n fitsfile = path.join(self.datapath, fitsfile)\n\n self.lastfile = fitsfile\n log.info(\"Starting new exposure, filename=%s\",\n path.basename(self.lastfile))\n args = ['./CCDDExposeDB.py', str(seconds), fitsfile, \n self.outputMetadata]\n if self.lastimgpath:\n args.append(self.lastimgpath)\n return self._run(args, \n env=dict(IMAGEDB_URI=self.imagedb_uri,\n IMAGEDB_COLLECTION=self.imagedb_collection)\n )", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "async def integrate(self, exposure_time=1):\n if not self.status == ControllerStatus.IDLE:\n raise ArchonError(\"Status must be IDLE to start integrating.\")\n\n await self.set_param(\"IntMS\", int(exposure_time * 1000))\n await self.set_param(\"Exposures\", 1)\n\n self.status = ControllerStatus.EXPOSING", "def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,\n vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,\n frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',\n pixels=512, pixel_size=16e-6):\n self.name = name\n self.img_acq_type = img_acq_type\n\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n\n # supporting camera acquisition settings\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n\n if isinstance(pixels, int):\n self.pixels = (pixels, pixels)\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)", "def factor_exposure(asset: Asset, risk_model_id: str, factor_name: str, *,\n source: str = None, real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n risk_model = RiskModel(risk_model_id)\n factor = Factor(risk_model_id, factor_name)\n if factor.factor is None or risk_model_id != factor.risk_model_id:\n raise MqValueError('Requested factor not available in requested risk model')\n\n asset_gsid = asset.get_identifiers().get('GSID')\n\n # Establish date interval for data query\n dates = risk_model.get_dates()\n start_date = dt.datetime.strptime(min(dates), \"%Y-%m-%d\").date() if dates else None\n end_date = dt.datetime.strptime(max(dates), \"%Y-%m-%d\").date() if dates else None\n\n # Query data and append pull requested factor exposure\n all_exposures = []\n query_results = risk_model.get_data(\n measures=[Measure.Factor_Name, Measure.Universe_Factor_Exposure, Measure.Asset_Universe],\n start_date=start_date,\n end_date=end_date,\n assets=DataAssetsRequest(identifier=AssetUniverseIdentifier.gsid, universe=[asset_gsid])).get('results', [])\n for result in query_results:\n if result.get('date') in dates:\n exposures = result.get('assetData', {}).get('factorExposure', [])\n if exposures:\n all_exposures.append(\n {'date': result['date'],\n 'factorExposure': exposures[0].get(factor.factor.identifier)})\n\n # Create and return timeseries\n df = pd.DataFrame(all_exposures)\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.FACTOR_EXPOSURE)", "def wfits(self, filename=None):\n with self.lock:\n dark = self.dark\n if not filename:\n if dark != 0:\n filename = self.getNextFilename(\"dark\")\n else:\n filename = self.getNextFilename(\"object\")\n with self.lock:\n if(self.data.size == 0):\n raise FliError(\"No image available\")\n hdu = pyfits.PrimaryHDU(self.data)\n hdr = hdu.header\n with self.lock:\n hdr.set('DATE', self.timestamp, 'exposure begin date')\n hdr.set('INSTRUME', self.devname, 'this instrument')\n hdr.set('SERIAL', self.devsn, 'serial number')\n hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')\n hdr.set('VBIN', self.vbin, 'vertical binning')\n hdr.set('HBIN', self.hbin, 'horizontal binning')\n hdr.set('CCD-TEMP', self.temp, 'CCD temperature')\n if dark != 0:\n hdr.set('SHUTTER', 'CLOSE', 'shutter status')\n else:\n hdr.set('SHUTTER', 'OPEN', 'shutter status')\n hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')\n hdu.writeto(filename, overwrite=True, checksum=True)\n with self.lock:\n self.filename = filename", "def create_exposure(event_class,event_type,egy,cth):\n\n if isinstance(event_type,int):\n event_type = evtype_string[event_type]\n \n irf_factory=pyIrfLoader.IrfsFactory.instance()\n irf = irf_factory.create('%s::%s'%(event_class,event_type))\n\n irf.aeff().setPhiDependence(False)\n \n theta = np.degrees(np.arccos(cth))\n \n # Exposure Matrix\n # Dimensions are Etrue and incidence angle\n m = np.zeros((len(egy),len(cth)))\n\n for i, x in enumerate(egy):\n for j, y in enumerate(theta): \n m[i,j] = irf.aeff().value(10**x,y,0.0)\n\n return m", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def take_image(self, shutter='normal', exptime=0.0,\n readout=2.0, save_as=\"\", timeout=None):\n\n s = time.time()\n parameter_list = []\n readout_time = 5\n exptime_ms = 0\n\n print(self.opt.getParameter('TimeStamps'), 'timestamp')\n # 1. Set the shutter state\n shutter_return = self._set_shutter(shutter)\n if shutter_return:\n parameter_list += shutter_return\n else:\n return {'elaptime': time.time()-s,\n 'error': \"Error setting shutter state\"}\n\n # 2. Convert exposure time to ms`\n try:\n exptime_ms = int(float(exptime) * 1000)\n logger.info(\"Converting exposure time %(exptime)ss\"\n \" to %(exptime_ms)s\"\n \"milliseconds\", {'exptime': exptime,\n 'exptime_ms': exptime_ms})\n parameter_list.append(['ExposureTime', exptime_ms])\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting exposure time\", exc_info=True)\n\n # 3. Set the readout speed\n logger.info(\"Setting readout speed to: %s\", readout)\n if readout not in self.AdcSpeed_States:\n logger.error(\"Readout speed '%s' is not valid\", readout)\n return {'elaptime': time.time()-s,\n 'error': \"%s not in AdcSpeed states\" % readout}\n parameter_list.append(['AdcSpeed', readout])\n\n # 4. Set parameters and get readout time\n try:\n logger.info(\"Sending configuration to camera\")\n readout_time = self._set_parameters(parameter_list)\n r = int(readout_time) / 1000\n logger.info(\"Expected readout time=%ss\", r)\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting parameters\", exc_info=True)\n\n # 5. Set the timeout return for the camera\n if not timeout:\n timeout = int(int(readout_time) + exptime_ms + 10000)\n else:\n timeout = 10000000\n\n # 6. Get the exposure start time to use for the naming convention\n start_time = datetime.datetime.utcnow()\n self.lastExposed = start_time\n logger.info(\"Starting %(camPrefix)s exposure\",\n {'camPrefix': self.camPrefix})\n try:\n data = self.opt.readNFrames(N=1, timeout=timeout)[0][0]\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Unable to get camera data\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': \"Failed to gather data from camera\",\n 'send_alert': True}\n\n logger.info(\"Readout completed\")\n logger.debug(\"Took: %s\", time.time() - s)\n\n if not save_as:\n start_exp_time = start_time.strftime(\"%Y%m%d_%H_%M_%S\")\n # Now make sure the utdate directory exists\n if not os.path.exists(os.path.join(self.outputDir,\n start_exp_time[:8])):\n logger.info(\"Making directory: %s\", os.path.join(self.outputDir,\n start_exp_time[:8]))\n\n os.mkdir(os.path.join(self.outputDir, start_exp_time[:8]))\n\n save_as = os.path.join(self.outputDir, start_exp_time[:8], self.camPrefix+start_exp_time+'.fits')\n\n try:\n datetimestr = start_time.isoformat()\n datestr, timestr = datetimestr.split('T')\n hdu = fits.PrimaryHDU(data, uint=False)\n hdu.scale('int16', bzero=32768)\n hdu.header.set(\"EXPTIME\", float(exptime), \"Exposure Time in seconds\")\n hdu.header.set(\"ADCSPEED\", readout, \"Readout speed in MHz\")\n hdu.header.set(\"TEMP\", self.opt.getParameter(\"SensorTemperatureReading\"),\n \"Detector temp in deg C\")\n hdu.header.set(\"GAIN_SET\", 2, \"Gain mode\")\n hdu.header.set(\"ADC\", 1, \"ADC Quality\")\n hdu.header.set(\"MODEL\", 22, \"Instrument Mode Number\")\n hdu.header.set(\"INTERFC\", \"USB\", \"Instrument Interface\")\n hdu.header.set(\"SNSR_NM\", \"E2V 2048 x 2048 (CCD 42-40)(B)\", \"Sensor Name\")\n hdu.header.set(\"SER_NO\", self.serialNumber, \"Serial Number\")\n hdu.header.set(\"TELESCOP\", self.telescope, \"Telescope ID\")\n hdu.header.set(\"GAIN\", self.gain, \"Gain\")\n hdu.header.set(\"CAM_NAME\", \"%s Cam\" % self.camPrefix.upper(), \"Camera Name\")\n hdu.header.set(\"INSTRUME\", \"SEDM-P60\", \"Camera Name\")\n hdu.header.set(\"UTC\", start_time.isoformat(), \"UT-Shutter Open\")\n hdu.header.set(\"END_SHUT\", datetime.datetime.utcnow().isoformat(), \"Shutter Close Time\")\n hdu.header.set(\"OBSDATE\", datestr, \"UT Start Date\")\n hdu.header.set(\"OBSTIME\", timestr, \"UT Start Time\")\n hdu.header.set(\"CRPIX1\", self.crpix1, \"Center X pixel\")\n hdu.header.set(\"CRPIX2\", self.crpix2, \"Center Y pixel\")\n hdu.header.set(\"CDELT1\", self.cdelt1, self.cdelt1_comment)\n hdu.header.set(\"CDELT2\", self.cdelt2, self.cdelt2_comment)\n hdu.header.set(\"CTYPE1\", self.ctype1)\n hdu.header.set(\"CTYPE2\", self.ctype2)\n hdu.writeto(save_as, output_verify=\"fix\", )\n logger.info(\"%s created\", save_as)\n if self.send_to_remote:\n ret = self.transfer.send(save_as)\n if 'data' in ret:\n save_as = ret['data']\n return {'elaptime': time.time()-s, 'data': save_as}\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error writing data to disk\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': 'Error writing file to disk:' % str(e)}", "def observeField(target, exposure):\n\n status = 2\n real_exposure = exposure + np.random.normal(0.0, 20.0)\n realSN2 = target['DESsn2'] + np.random.uniform(0.0, 1.0)\n\n return status, real_exposure, realSN2", "def select_exposure(self):\n exp1_selected = self.exp1_radio.isChecked()\n\n if self.recording_sequence:\n self.record_sequence() # stop current recording\n\n if exp1_selected: # then exp1\n ifi_ndx = self.exp1_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp1_select.currentIndex(), ifi_ndx)\n else:\n ifi_ndx = self.exp2_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp2_select.currentIndex(), ifi_ndx)\n\n temp = list(self.dpar.iwindow_toggle_save)\n self.dpar.iwindow_toggle_save = list(self.dpar.iwindow[0])\n self.dpar.iwindow[0] = temp\n self._update_scrollbars()\n\n self.rec_seq_button.setEnabled(ifi_ndx > 0)\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def factor_exposure(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_EXPOSURE)", "def exposureCallback(self, config):\n rospy.loginfo('Set exposure: {}'.format(config['exposure']))", "def __exp1_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp1_radio.setChecked(True)\n self.exp1_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "async def expose(obj, exptime, outfile, overwrite):\n\n async with obj['camera_system'] as fli:\n\n log.debug('starting camera exposures ... ')\n exposures = await asyncio.gather(*[camera.expose(exptime)\n for camera in fli.cameras],\n return_exceptions=False)\n\n log.debug('writing images to disk ... ')\n writers = []\n for exposure in exposures:\n if outfile:\n outfile = outfile.format(camera=exposure.camera)\n writers.append(exposure.write(filename=outfile,\n overwrite=overwrite))\n else:\n writers.append(exposure.write(overwrite=overwrite))\n\n await asyncio.gather(*writers, return_exceptions=True)", "def ptc_acquisition(self, explow=0.1, exphigh=2, expdelta=0.1, laserchannel = 2, lasercurrent=45.0):\n\n #\n self.laser.select(laserchannel)\n self.laser.setCurrent(laserchannel, lasercurrent)\n self.laser.enable()\n\n #self.powerup_CCD()\n self.reb.set_testtype('PTC')\n\n #self.DKD.setup_current_measurements(DKD_range)\n self.PhD.setup_current_measurements(2e-8)\n\n # Create the logging summary file\n summaryfile = os.path.join(eodir, 'summary.log')\n f = open(summaryfile, 'a')\n\n print >>f, \"# power\\t exposure time\\t file name\"\n\n effpow = self.laser.getPower(laserchannel)\n # First take bias frames\n self.log(\"Taking bias\")\n m = self.execute_reb_sequence('ClearBias', 0, 20, True )\n #to have only useful channels:\n fname = \"%s_ptc_bias_%s.fits\" % (serno, self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, 0, fname\n\n for t in np.arange(explow, exphigh+expdelta, expdelta):\n # pair of flats\n for numpair in [1, 2]:\n effpow = self.laser.getPower(laserchannel)\n m = self.execute_reb_sequence('Acquisition', t)\n #to have only useful channels:\n fname = \"%s_ptc_flat%d_%05d_%s.fits\" % (serno, numpair, int(t*100), self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, t, fname\n\n f.close()\n\n # Shutting down (not the lamp by default)\n self.laser.disable()\n #self.shutdown_CCD()\n # p = self.reb.start_waiting_sequence()", "def configure_exposure(cam,exposure):\n\n #print(\"*** CONFIGURING EXPOSURE ***\\n\")\n\n try:\n result = True\n\n # Turn off automatic exposure mode\n #\n # *** NOTES ***\n # Automatic exposure prevents the manual configuration of exposure\n # times and needs to be turned off for this example. Enumerations\n # representing entry nodes have been added to QuickSpin. This allows\n # for the much easier setting of enumeration nodes to new values.\n #\n # The naming convention of QuickSpin enums is the name of the\n # enumeration node followed by an underscore and the symbolic of\n # the entry node. Selecting \"Off\" on the \"ExposureAuto\" node is\n # thus named \"ExposureAuto_Off\".\n #\n # *** LATER ***\n # Exposure time can be set automatically or manually as needed. This\n # example turns automatic exposure off to set it manually and back\n # on to return the camera to its default state.\n\n \n\n # Set exposure time manually; exposure time recorded in microseconds\n #\n # *** NOTES ***\n # Notice that the node is checked for availability and writability\n # prior to the setting of the node. In QuickSpin, availability and\n # writability are ensured by checking the access mode.\n #\n # Further, it is ensured that the desired exposure time does not exceed\n # the maximum. Exposure time is counted in microseconds - this can be\n # found out either by retrieving the unit with the GetUnit() method or\n # by checking SpinView.\n\n if cam.ExposureTime.GetAccessMode() != PySpin.RW:\n print(\"Unable to set exposure time. Aborting...\")\n return False\n\n # Ensure desired exposure time does not exceed the maximum\n exposure_time_to_set = exposure\n exposure_time_to_set = min(cam.ExposureTime.GetMax(), exposure_time_to_set)\n cam.ExposureTime.SetValue(exposure_time_to_set)\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n return result", "def __exp2_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp2_radio.setChecked(True)\n self.exp2_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def exp_scan(self, exposure_time_list):\n self.generic_scan(self.exp, exposure_time_list)", "def get_exposure_value():\n validate(request.json, 'exposureValueRequestSchema', 'swagger/getExposureValue.yml')\n logging.info(\"get_exposure_value({0})\".format(request.json))\n return database.get_exposure_value(loc=request.json['loc'],\n stime=ExposureUtil.to_timestamp(request.json['stime']),\n etime=ExposureUtil.to_timestamp(request.json['etime']),\n tres=request.json['tres'],\n tstat=request.json['tstat'])" ]
[ "0.66172343", "0.66024214", "0.61997694", "0.6147396", "0.6001152", "0.5911744", "0.5895627", "0.5890128", "0.5692038", "0.5651894", "0.5623061", "0.56096864", "0.54805845", "0.54146963", "0.5291824", "0.5205684", "0.5177268", "0.51427567", "0.51357037", "0.5105987", "0.5099826", "0.50125957", "0.49856898", "0.49391565", "0.49388713", "0.4927951", "0.49048397", "0.48570973", "0.4849138", "0.48462513" ]
0.7639681
0
This is the method that receives the client's data and decides what to do with it. It runs in a loop to always be accepting new connections. If the data is 'status', the CCD status is returned. If the data is 'stop', the current exposure is stopped. If the data is anything else, a new thread is created and the data is sent to handle_command().
async def handle_client(reader, writer): request = None # loop to continually handle incoming data while request != 'quit': request = (await reader.read(255)).decode('utf8') print(request.encode('utf8')) #log.info('COMMAND = '+request) writer.write(('COMMAND = '+request.upper()+'\n').encode('utf8')) response = 'BAD' # check if data is empty, a status query, or potential command dataDec = request if dataDec == '': break elif 'status' in dataDec.lower(): response = 'OK' # check if the command thread is running try: if exposureState() > 0: response = response + '\nBUSY' else: response = response + '\nIDLE' except: response = response + '\nIDLE' if ccd_frame[0].s == PyIndi.ISS_ON: frameType = 'LIGHT' elif ccd_frame[1].s == PyIndi.ISS_ON: frameType = 'BIAS' elif ccd_frame[2].s == PyIndi.ISS_ON: frameType = 'DARK' elif ccd_frame[3].s == PyIndi.ISS_ON: frameType = 'FLAT' response = response+\ '\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\ '\nCCD TEMP = '+str(ccd_temp[0].value)+\ 'C\nLAST FRAME TYPE = '+str(frameType)+\ '\nFILE DIR = '+str(fileDir)+\ '\nLAST IMAGE = '+str(imgName) # send current status to open connection & log it #log.info('RESPONSE: '+response) writer.write((response+'\nDONE\n').encode('utf-8')) elif 'stop' in dataDec.lower(): # check if the command thread is running try: if comThread.is_alive(): response = 'OK: aborting exposure' ccd_abort[0].s=PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_abort) blobEvent.set() #Ends the currently running thread. response = response+'\nExposure Aborted' else: response = 'OK: idle' except: response = 'OK: idle' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # check if the command thread is running, may fail if not created yet, hence try/except try: if comThread.is_alive(): response = 'BAD: busy' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() except: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() await writer.drain() writer.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleRecvData(self, data):\n\n\t\t#Look for commands\n\t\tif data == 'Hello':\n\t\t\t#Inform client it is 'connected'\n\t\t\tself.transmit(\"Welcome\")\n\n\t\telif data == 'kill':\t\n\t\t\t#Stop the server running\n\t\t\tself.running = False\n\n\t\telif data == 'control':\n\t\t\t#Print out if in control of car\n\t\t\tprint(\"Control query\")\n\t\t\tif self.arduino:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: True\")\n\t\t\telse:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: False\")\n\n\t\telif data == 'Hello?':\n\t\t\t#The client is still alive therefore set missing count to 0\n\t\t\tself.missingCount = 0\n\n\t\t#Look for Arduino Data\n\t\telif self.arduino:\n\t\t\t#Check if controlling the car and it's a valid car command\n\t\t\tif self.arduino.readPack(data): \n\t\t\t\tprint(self.address)\n\t\t\t\tprint(\"Sent to arduino: %s\" % data)\n\t\t\telse:\n\t\t\t\t#Print out other data\n\t\t\t\tprint(\"Not valid Arduino data\")\n\t\t\t\tprint(self.address)\n\t\t\t\tprint(data)\n\n\t\telse:\n\t\t\t#All other data print out\n\t\t\tprint(self.address)\n\t\t\tprint(data)", "def handle(self):\n print \"Client %s:%s connected\" % self.client_address\n self.controller = False\n\n try:\n while not self.server.is_shutting_down.is_set():\n command = self.rfile.readline().strip()\n\n # meta commands: these control the meta operations\n # they do not drive the robot\n if not command:\n self.send_output('ok', '')\n continue\n\n if command == 'exit':\n self.send_output('ok', 'done')\n break\n\n if command == 'shutdown':\n self.send_output('ok', 'shutdown')\n self.server.shutdown()\n # the main thread will shut down the robot\n break\n\n if command == 'control':\n if self.controller:\n self.send_output('ok', 'was already a controller')\n else:\n self.controller = self.server.control_lock.acquire(blocking = 0)\n if self.controller:\n self.send_output('ok', 'acquired control lock')\n else:\n self.send_output('error', 'cannot acquire control lock')\n\n continue\n\n try:\n output = self.process_command(command)\n\n # got an invalid command (could not parse\n except CommandError, e:\n self.send_output('invalid', e.message)\n # driver rejected the command, but not due to an error\n except (drivers.common.ParameterError, drivers.common.StoppedError), e:\n self.send_output('rejected', e.message)\n # unknown error -- send error to the client, and log the exception\n except Exception, e:\n traceback.print_exc()\n self.send_output('error', str(e))\n else:\n self.send_output('ok', output)\n self.server.last_request = time.time()\n\n finally:\n output = [\"%s:%s disconnected\" % self.client_address]\n if self.controller:\n self.server.control_lock.release()\n self.server.robot.stop()\n output.append(\"; robot stopped. no more controlling client\")\n else:\n output.append(\"; was a viewer\")\n\n print \"\".join(output)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def run(self):\n self._create_command_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient command socket timeout!')\n continue\n\n self._close_socket()", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def run(self):\n\n try:\n while True:\n self.log.info(\"Waiting for a connection...\")\n self.mc.events.post('client_disconnected')\n self.connection, client_address = self.socket.accept()\n\n self.log.info(\"Received connection from: %s:%s\",\n client_address[0], client_address[1])\n self.mc.events.post('client_connected',\n address=client_address[0],\n port=client_address[1])\n\n # Receive the data in small chunks and retransmit it\n while True:\n try:\n data = self.connection.recv(4096)\n if data:\n commands = data.split(\"\\n\")\n for cmd in commands:\n if cmd:\n self.process_received_message(cmd)\n else:\n # no more data\n break\n\n except:\n if self.mc.config['mediacontroller']['exit_on_disconnect']:\n self.mc.shutdown()\n else:\n break\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def run(self):\n # pylint: disable=unused-variable\n try:\n LOGGER.debug(\"starting session from client %s\",\n str(self._client_address))\n self._data_handler.on_start(self)\n read_list = [self._sock]\n\n self._state.transit(sitcpy.THREAD_RUNNING)\n while self._state() == sitcpy.THREAD_RUNNING:\n try:\n readable, _, _ = select.select(read_list, [], [], 0.1)\n if self._sock in readable:\n\n # Receive data.\n byte_data = self._sock.recv(self._max_buff)\n if not byte_data:\n LOGGER.error(\"readable socket with no data. closing session\")\n break\n byte_data = self._rest_byte_data + byte_data if self._rest_byte_data else byte_data\n\n # Find delimiter position\n delimiter_pos = self._data_handler.find_delimiter_position(byte_data)\n\n if delimiter_pos >= 0:\n # If delimiter found.\n if not self._data_handler.on_data(self, byte_data[:delimiter_pos]):\n break\n self._rest_byte_data = byte_data[delimiter_pos:]\n else:\n # If delimiter not found.\n self._rest_byte_data = byte_data\n\n self._data_handler.on_idle(self)\n except Exception as exc:\n LOGGER.error(\"Exception at SessionThread.run : %s\", str(exc))\n raise\n del read_list[:]\n finally:\n self.close()\n self._state.transit(sitcpy.THREAD_STOPPED)", "def main(self):\n while True:\n if not self.data_server_command.empty():\n command_data_server = self.data_server_command.get()\n if command_data_server[0] == 4:\n thread.start_new_thread(self.get_file, (command_data_server[1],))\n else:\n self.data_server_command_def[command_data_server[0]](command_data_server[1])", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def _dispatch_from_client_request(self):\n # Listen for client connection\n self._from_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1)\n\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n\n client_name_read, _, _ = select([client_conn], [], [client_conn])\n if client_name_read:\n client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8'))\n else:\n print(\"Connection closed\")\n continue\n\n self._thread_lock.acquire()\n self._from_client_connections[client_conn] = client_name\n self._state[client_name] = 0\n self._thread_lock.release()\n\n print(\"Receiving commands from [\" + client_name + \", \" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def run(self):\n self._create_data_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient data socket timeout!')\n continue\n\n self._close_socket()", "def receive_data_from_server(self):\n while not self._stop_receive.is_set():\n # seems irrelevant now\n # if not self._pause_receive.is_set():\n try:\n # We are doing handshaking, so this is fine\n _server_reply = self.receive(True)\n if _server_reply:\n self._reply_queue.append(_server_reply)\n self.callback_client_receive(_server_reply)\n except MastermindErrorClient:\n logger.error(\"Mastermind Error:\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()\n except OSError:\n logger.warning(\"OS ERROR, disconnecting client.\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def listen(self):\n self.logger.info(\"Control server: {}\".format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = \"Not a JSON message!\"\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info(\"Exiting control server. Bye!\")\n self.clean_up()\n sys.exit(0)", "def run(self):\n patterns = self.conn.dispatch_patterns()\n\n while not self.disconnect.is_set():\n try:\n data = self.conn.get_data() # returns empty string if times out\n if data:\n self.conn.dispatch_data(data, patterns)\n\n command = self.command_queue.get_nowait()\n self.process_command(command)\n except DisconnectedException:\n self.logger.info('Disconnected from server. Reconnecting.')\n self.conn.close()\n self.connect_and_join_channels(self.channels)\n continue\n except Queue.Empty:\n continue", "def enter_read_loop(self):\n\n try:\n while True:\n try:\n request = DAPBaseMessage.recv(self._current_client)\n except Exception as e:\n # TODO send error\n traceback.print_exc()\n continue\n\n if request is None:\n # client terminated without termination request\n return\n try:\n self.resolve_message(request)\n except Exception as e:\n # TODO send error\n traceback.print_exc()\n self.next_seq += 1\n DAPErrorResponse.create(self.next_seq, rq.seq, False, message=\"Error\").send(self._current_client)\n continue\n\n if self._current_client is None:\n self._ready_for_events = False\n return # terminated\n\n except BaseException as e:\n # failure while communicating\n traceback.print_exc()\n pass\n finally:\n # final handler, clear active client\n self._current_client = None\n self._ready_for_events = False\n\n debugger.reset()", "def run(self):\n os.chdir(ServerFolder)\n while True:\n request = self.client_socket.recv(1024).decode().strip()\n if not request:\n print(\"Disconnecting from client {}:{}\".format(\n self.client_ip, self.client_port))\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.close()\n break\n request = request.split(\",\")\n\n if request[0] == \"LS\":\n self.ls()\n elif request[0] == \"PWD\":\n self.pwd()\n elif request[0] == \"CD\":\n self.cd(request[1])\n elif request[0] == \"MKDIR\":\n self.mkdir(request[1])\n elif request[0] == \"RMDIR\":\n self.rmdir(request[1])\n elif request[0] == \"RM\":\n self.rm(request[1])\n\n elif request[0] == \"rget\" and len(request[1:]) == 1:\n self.send_file(*request[1:])\n\n elif request[0] == \"rput\" and len(request[1:]) == 2:\n self.receive_file(*request[1:])", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def run(self):\n inputs = [self.server]\n\n while self.running:\n print '1'\n try:\n readable, writeable, exceptional = \\\n select.select(inputs, [], [])\n except select.error, e:\n print 'select:error[%s]' % e.message\n break\n\n for sock in readable:\n print '2'\n if sock == self.server:\n client, address = self.server.accept()\n client.setblocking(0)\n inputs.append(client)\n # self.outputs.append(client)\n\n print 'Client[%s] connected!' % address[0]\n self.clients[client] = address[0]\n\n else:\n print '3'\n self.recv_data(sock)", "def listen_to_connection(self, conn):\n with conn:\n print(\"Connected\")\n while self.running:\n data = conn.recv(32)\n if not data: \n return\n \n print(\"Recived Data:\"+str(data))\n self.__update(data)", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()", "def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)", "def run(self):\n\n print('Listening for client connections...')\n\n while not self.shutdownEvent.is_set():\n readyToRead, readyToWrite, inputError = select.select(self._socketList, [], [], self._selectTimeout)\n\n # Iterate over input sockets\n for sock in readyToRead:\n # Received new connection request\n if sock is self._serverSocket:\n print('Received connection request. Establishing connection with client.')\n\n # Accept the connection and append it to the socket list\n clientSocket, address = self._serverSocket.accept()\n\n #TODO: Add this if there's a timeout blocking issue, or make the sockets non-blocking\n #clientSocket.settimeout(0.5)\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.append(clientSocket)\n finally:\n self._socketListMutex.release()\n # Received message from client\n else:\n # Read a message off of the socket\n msgData = MessageHandler.recvMsg(sock)\n\n # Process the message\n if msgData is not None:\n self.__processMsg(sock, msgData)\n # The client disconnected\n else:\n print('Client disconnected')\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.remove(sock)\n finally:\n self._socketListMutex.release()\n\n sock.close()\n\n # Cleanup\n self.__shutdown()", "def listen(self):\n\n\t\twhile self.running:\n\t\t\t#Wait for server to inform you there is data\n\t\t\tself.rxEvt.wait()\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#See if recieved packet is actually latest from client\n\t\t\t\tif self.rxData[len(self.rxData)-1][0] >= self.rxLatest:\n\n\t\t\t\t\t#Update latest and pass data to data handler\n\t\t\t\t\tself.rxLatest = self.rxData[len(self.rxData)-1][0]\n\t\t\t\t\tself.handleRecvData(self.rxData[len(self.rxData)-1][1])\n\t\t\n\t\t\t\t\t#Clear event object so other clientHandlers begin waiting again\n\t\t\t\t\tself.rxEvt.clear()\n\n\t\t\texcept IndexError, e:\n\t\t\t\tprint(\"Index error on ServerClient listen\\nCarrying on Regardless\")", "def run(self):\n to_client_request_thread = threading.Thread(target=self._dispatch_to_client_request, daemon=True)\n to_client_request_thread.start()\n\n from_client_request_thread = threading.Thread(target=self._dispatch_from_client_request, daemon=True)\n from_client_request_thread.start()\n\n from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)\n from_client_commands_thread.start()\n\n to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)\n to_client_update_state_thread.start()\n\n server_control_thread = threading.Thread(target=self._server_control, daemon=True)\n server_control_thread.start()\n\n # Wait for threads to finish\n to_client_request_thread.join()\n from_client_request_thread.join()\n from_client_commands_thread.join()\n to_client_update_state_thread.join()\n server_control_thread.join()\n \n # Close server connection\n self._to_client_request.close()\n self._from_client_request.close()", "def data_received(self, data):\n # This may seem strange; feeding all bytes received to the **writer**,\n # and, only if they test positive, duplicating to the **reader**.\n #\n # The writer receives a copy of all raw bytes because, as an IAC\n # interpreter, it may likely **write** a responding reply.\n self._last_received = datetime.datetime.now()\n\n cmd_received = False\n for byte in data:\n try:\n recv_inband = self.writer.feed_byte(bytes([byte]))\n except:\n self._log_exception(logger.warning, *sys.exc_info())\n else:\n if recv_inband:\n # forward to reader (shell).\n self.reader.feed_data(bytes([byte]))\n\n # becomes True if any out of band data is received.\n cmd_received = cmd_received or not recv_inband\n\n # until negotiation is complete, re-check negotiation aggressively\n # upon receipt of any command byte.\n if not self._waiter_connected.done() and cmd_received:\n self._check_negotiation_timer()", "def runner(socket,id):\n socket.send('proceed')\n \n while True:\n data = socket.recv()\n print(\"id(\",id,\")=\",data)\n if not data: break\n \n elif data == \"info\":\n run_time = time.ctime(start_time)\n statusMessage = \"SERVER STATUS: Running...\\nInterface id:\"+str(id)+\"\\nBeen running since: \"+str(run_time)+\"\\n\"\n socket.send(statusMessage)\n \n elif data == \"plug\": #talk to plugin? aka. other commands \n pass\n \n else: #not valid command.\n socket.send(\"invalid command\")\n \n socket.close() \n print(\"closed connection\") #means the thread is also quitting", "def start(self):\n\t\twhile True:\n\t\t\tmensaje_servidor = \">>SERVIDOR:\"\n\t\t\tself.s.listen()\n\t\t\tprint(mensaje_servidor + \"ESPERANDO POR CLIENTES CTRL-C PARA TERMINAR EJECUCIÓN\")\n\t\t\tself.CONEXION, direccion = self.s.accept()\n\t\t\tprint(mensaje_servidor + \"CONEXIÓN RECIBIDA DE \" + str(direccion))\n\t\t\twhile True:\n\t\t\t\tmensaje_cliente = self.recibir_mensaje()\n\t\t\t\tif mensaje_cliente.startswith(\"download\"):\n\t\t\t\t\tself.enviar_mensaje(self.hacer_lista())\n\t\t\t\t\tnombre_archivo = self.recibir_mensaje().split(\":\")[1]\n\t\t\t\t\tself.enviar_archivo(nombre_archivo)\n\t\t\t\telif mensaje_cliente.startswith(\"load\"):\n\t\t\t\t\tnombre_archivo = mensaje_cliente.split(\":\")[1]\n\t\t\t\t\tself.recibir_archivo(nombre_archivo)\n\t\t\t\telif mensaje_cliente.startswith(\"list:\"):\n\t\t\t\t\tself.enviar_mensaje(self.hacer_lista())\n\t\t\t\telif mensaje_cliente == \"stop\":\n\t\t\t\t\tbreak", "def handleClient(self, connection, address):\r\n # time.sleep(5) #server Action\r\n while True:\r\n try:\r\n data = connection.recv(1024).decode(\"utf-8\")\r\n except:\r\n print('client disconnect: ', address, 'at', self.now())\r\n data = \"\"\r\n\r\n if not data: break\r\n\r\n data = self.change_host(data, address)\r\n result = self.manag_bd.dispatcher(data)\r\n\r\n mutex = thread.allocate_lock()\r\n\r\n\r\n if type(result)==type(list()):\r\n mutex.acquire() #Lock interrupt\r\n l = len(result)\r\n reply = str(l)\r\n connection.send(reply.encode(\"utf-8\"))\r\n for line in result:\r\n time.sleep(0.0025)\r\n reply = line\r\n connection.send(reply.encode(\"utf-8\"))\r\n mutex.release()# permission to interrupt\r\n else:\r\n reply = str(self.now())\r\n connection.send(reply.encode(\"utf-8\"))\r\n\r\n\r\n\r\n connection.close()" ]
[ "0.7136164", "0.66953665", "0.6398843", "0.6384446", "0.63746595", "0.6355251", "0.6276334", "0.62578547", "0.6232429", "0.6153676", "0.61311704", "0.60923487", "0.60865486", "0.6026433", "0.59538", "0.5909406", "0.5902352", "0.58999", "0.58918047", "0.58892024", "0.58742887", "0.5871203", "0.5865603", "0.5857102", "0.5857042", "0.5853978", "0.5846204", "0.5843299", "0.5805365", "0.58023816" ]
0.6803808
1
Get list of names of accessible repositories (including owner)
def list_repositories(self): data = self._get_all_data('/user/repos') return [repo['full_name'] for repo in data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def query_repos(self):\n return [self.config[\"repo\"]]", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def list_repositories(self):\n repos = self.repo_conn.list_repositories()\n return repos", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def get_repos():\n\n return __do_get_repos()", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def list_ambari_managed_repos(stack_name):\n stack_name = stack_name.upper()\n # TODO : get it dynamically from the server\n repository_names = [stack_name, stack_name + \"-UTILS\" ]\n if OSCheck.is_ubuntu_family():\n repo_dir = '/etc/apt/sources.list.d/'\n elif OSCheck.is_redhat_family(): # Centos/RHEL 5/6\n repo_dir = '/etc/yum.repos.d/'\n elif OSCheck.is_suse_family():\n repo_dir = '/etc/zypp/repos.d/'\n else:\n raise Fail('Can not dermine repo dir')\n repos = []\n for name in repository_names:\n # List all files that match pattern\n files = glob.glob(os.path.join(repo_dir, name) + '*')\n for f in files:\n filename = os.path.basename(f)\n # leave out extension\n reponame = os.path.splitext(filename)[0]\n repos.append(reponame)\n # get uniq strings\n seen = set()\n uniq = [s for s in repos if not (s in seen or seen.add(s))]\n return uniq", "def list_ecr_repositories():\n repositories = ECS_MANAGER.list_ecr_repositories()\n\n if repositories:\n print(str_sep)\n print(\"Listing repositories available in {}\".format(SESSION.region_name.upper()))\n print(\"{:30}{:60}\".format('NAME', 'URI'))\n print(str_sep)\n\n for rep in repositories['repositories']:\n print(\"{:30}{:60}\".format(rep['repositoryName'], rep['repositoryUri']))", "def get_repositories(self):\n if not self.parentpath or not os.path.exists(self.parentpath):\n return []\n repos = os.listdir(self.parentpath)\n reponames = {}\n for name in repos:\n dir = os.path.join(self.parentpath, name)\n \n command = self.admin + ' verify \"%s\"' % dir\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (result, error) = process.communicate()\n \n rev = result[result.rfind('revision') + 9:len(result) - 2]\n displayrev = rev\n if rev == '0':\n rev = ''\n displayrev = ''\n reponames[name] = {\n 'dir': dir,\n 'rev': rev,\n 'display_rev': displayrev\n }\n return reponames.iteritems()", "def get_known_repos() -> List[str]:\n return [db.name for db in PacmanConfig(conf=\"/etc/pacman.conf\").initialize_alpm().get_syncdbs()]", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def query_repositories():\n return buildapi.query_repositories()", "def get_registries():\n url = \"/\".join([REGISTRY_BASE, \"_catalog\"])\n response = req(url)\n if response is not None:\n return response[\"repositories\"]\n return []", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def get_repos(self):\n return requests.get(\"https://api.github.com/user/repos\",\n headers=self.headers).json", "def repos(self):\r\n return repositories.Repos(self)", "def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationServiceGitRepositoryArgs']]]]:\n return pulumi.get(self, \"repositories\")", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def _get_repo_contributors(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}/contributors\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def addons_repositories(self) -> list[str]:\n return self._data[ATTR_ADDONS_CUSTOM_LIST]" ]
[ "0.7608418", "0.7267118", "0.7101556", "0.70416945", "0.6976196", "0.69618136", "0.6874849", "0.6862077", "0.68582284", "0.68060064", "0.6804673", "0.6765574", "0.67367995", "0.67278445", "0.67108905", "0.66979", "0.6602462", "0.6571694", "0.65691054", "0.6552383", "0.6520411", "0.6479025", "0.64617926", "0.64311785", "0.64091647", "0.6400867", "0.6396747", "0.63924646", "0.6376847", "0.6374998" ]
0.78467643
0
Get dict of labels with colors for given repository slug
def list_labels(self, repository): data = self._get_all_data('/repos/{}/labels'.format(repository)) return {l['name']: str(l['color']) for l in data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def list_labels(service, repo):\n app = App()\n if repo:\n serv = app.get_service(service, repo=repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n print(tabulate([\n (\n label.name,\n label.color,\n label.description\n )\n for label in repo_labels\n ], tablefmt=\"fancy_grid\"))", "def assign_colour_label_data(catl):\n\n logmstar_arr = catl.logmstar.values\n u_r_arr = catl.modelu_rcorr.values\n\n colour_label_arr = np.empty(len(catl), dtype='str')\n for idx, value in enumerate(logmstar_arr):\n\n # Divisions taken from Moffett et al. 2015 equation 1\n if value <= 9.1:\n if u_r_arr[idx] > 1.457:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value > 9.1 and value < 10.1:\n divider = 0.24 * value - 0.7\n if u_r_arr[idx] > divider:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value >= 10.1:\n if u_r_arr[idx] > 1.7:\n colour_label = 'R'\n else:\n colour_label = 'B'\n \n colour_label_arr[idx] = colour_label\n \n catl['colour_label'] = colour_label_arr\n\n return catl", "def map_label_colors(array, ignore_vals=[0]):\n colset = [(166, 206, 227),\n (31, 120, 180),\n (178, 223, 138),\n (51, 160, 44),\n (251, 154, 153),\n (227, 26, 28),\n (253, 191, 111),\n (255, 127, 0),\n (202, 178, 214),\n (106, 61, 154),\n (255, 255, 153),\n (177, 89, 40)]\n levels = np.unique(array)\n levels = [l for l in levels if l not in ignore_vals]\n if len(levels) == 0:\n return\n if len(levels) == 1:\n return({levels[0]: colset[0]})\n step = len(colset) / (len(levels) - 1)\n\n col_idx = np.arange(0, len(colset), step)\n colors = {}\n for idx in range(len(levels)):\n colors[levels[idx]] = colset[col_idx[idx]]\n return colors", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def milestone_labels(argv=None):\n argv = argv or sys.argv[1:]\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('color', help='color to make the labels')\n\n args = parser.parse_args(argv)\n\n session = GithubSession()\n\n labels = session.get_labels()\n\n labels_by_name = dict([(label['name'], label) for label in labels])\n\n for milestone in session.get_milestones():\n label_name = f'epic:{milestone[\"title\"]}'\n\n if label_name in labels_by_name:\n continue\n\n labels_by_name[label_name] = session.create_label(label_name, args.color)\n\n return labels_by_name", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def get_node_color(node_label):\n for NODE_KEY in list(NODE_TYPES.keys()):\n if node_label in NODE_TYPES[NODE_KEY]:\n return NODE_COLOR_DICT[NODE_KEY]\n try:\n x = int(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n x = float(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n node_label = node_label.replace(\"\\'\", \"\\\"\")\n tree = json.loads(node_label)\n for key in tree.keys():\n if key not in NODE_TYPES['Learner Params']:\n return NODE_COLOR_DICT['Uncategorized']\n else:\n try:\n x = int(tree[key])\n except:\n try:\n x = float(tree[key])\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Learner Params']\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Uncategorized']", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def labels(self):\r\n return labels.RepoLabels(self)", "def ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)", "def compute_color_for_labels(label):\n\tcolor = [int((p * (label**2 - label + 1)) % 255) for p in palette]\n\treturn tuple(color)", "def _create_color_lot(color_names, color_subnames, color_dict_rgb):\n lot = {}\n i = 0\n for sn in np.arange(len(color_subnames)):\n for n in np.arange(len(color_names)):\n lot[i] = color_dict_rgb[color_names[n]][color_subnames[sn]]\n i += 1\n\n return lot", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def colors(self) -> dict:\n raise NotImplementedError", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def get_value( self, trans, grid, repository ):\n repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )\n heads = hg_util.get_repository_heads( repo )\n multiple_heads = len( heads ) > 1\n if multiple_heads:\n heads_str = '<font color=\"red\">'\n else:\n heads_str = ''\n for ctx in heads:\n heads_str += '%s<br/>' % hg_util.get_revision_label_from_ctx( ctx, include_date=True )\n heads_str.rstrip( '<br/>' )\n if multiple_heads:\n heads_str += '</font>'\n return heads_str", "def _build_label(self):\n counter = Counter()\n _, labels = self.read_json()\n counter.update(labels)\n dictionary = dict()\n for i, word in enumerate(counter.most_common()):\n dictionary[word[0]] = i\n return dictionary", "def color(self, label):\n if self.grayscale:\n return (\"#ffffff\", \"#555555\", \"#888888\", \"#bbbbbb\", \"#222222\")[label]\n # COC WL WR SL SR\n return (\"#4e73b0\", \"#fdb863\", \"#b2abd2\", \"#e66101\", \"#5e3c99\")[label]", "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap" ]
[ "0.65194696", "0.5914808", "0.5822218", "0.5784747", "0.57322896", "0.5691123", "0.5686196", "0.5633396", "0.56304544", "0.56205434", "0.55804044", "0.5575821", "0.5575821", "0.5575821", "0.5575821", "0.5560394", "0.55267113", "0.55267113", "0.5519801", "0.5494467", "0.54919934", "0.5491675", "0.5484689", "0.54767305", "0.5471285", "0.54246074", "0.54210824", "0.5395026", "0.53919697", "0.5371324" ]
0.7589527
0
Create new label in given repository
def create_label(self, repository, name, color, **kwargs): data = {'name': name, 'color': color} response = self.session.post( '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository), json=data ) if response.status_code != 201: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label(self, org, name):\n pass", "def test_issue_create_label(self):\n pass", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def test_issue_add_label(self):\n pass", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def new_label(self, context, payload):\n\n labels = GmailActions.labels(context)['labels']\n label_id = \"\"\n\n for label in labels:\n if label['name'] == payload['name']:\n label_id = label['id']\n break\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"labels/{label_id}\"\n response = util.rest(\"GET\", url, access_token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text)", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def add_label(self, new_name, status):\n api_uri = self._uri_dict.get('addLabel')\n data = {\n 'newName': new_name,\n 'status': status\n }\n r_data = self._post(api_uri, data)\n return r_data", "def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])", "def add_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--add-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def label_new(request):\n if request.method == 'POST':\n form = NewLabelForm(request.POST)\n\n if form.is_valid():\n label = form.save()\n messages.success(request, 'Label successfully created.')\n return HttpResponseRedirect(reverse('label_main', args=[label.id]))\n else:\n messages.error(request, 'Please correct the errors below.')\n else:\n form = NewLabelForm()\n\n return render_to_response('annotations/label_new.html', {\n 'form': form,\n },\n context_instance=RequestContext(request)\n )", "def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session(), **kwargs) -> List[dict]:\n url = get_create_label_url()\n templates_ids = [template.id for template in templates]\n\n description = kwargs.get('description', None)\n has_multiple_top_candidates = kwargs.get('has_multiple_top_candidates', False)\n data_type = kwargs.get('data_type', 'Text')\n\n data = {\"project\": project_id,\n \"text\": label_name,\n \"description\": description,\n \"has_multiple_top_candidates\": has_multiple_top_candidates,\n \"get_data_type_display\": data_type,\n \"templates\": templates_ids\n }\n\n r = session.post(url=url, json=data)\n\n assert r.status_code == requests.codes.created, f'Status of request: {r}'\n label_id = r.json()['id']\n return label_id", "def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])", "def test_0010_create_repository(self):\n category = self.create_category(name=category_name, description=category_description)\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.get_or_create_repository(name=repository_name,\n description=repository_description,\n long_description=repository_long_description,\n owner=common.test_user_1_name,\n category_id=self.security.encode_id(category.id),\n strings_displayed=[])", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def addLabel(*args):", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, namespace: ghidra.program.model.symbol.Namespace, makePrimary: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ...", "def __call__(self, *args, **kwargs) -> L:\n label = self._label_adapter.create_label(*args,\n document=self._document,\n **kwargs)\n self._current_labels.append(label)\n return label", "def command_new_repo(self):\n repoinit.new_repo(*self.args())", "def post_label():\n label_id = dao.set_label(id=str(uuid.uuid4()),\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def update_label(self, repository, name, color, old_name=None, **kwargs):\n data = {'name': name, 'color': color}\n response = self.session.patch(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, old_name or name\n ),\n json=data\n )\n if response.status_code != 200:\n raise GitHubError(response)", "def create_label(self, name):\n payload = self._build_params(name=name)\n return Label.deserialize(self._post('labels', None, payload))", "def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label", "def add_label(self, label):\n status = self.ocp.add_label(resource_name=self.name, label=label)\n self.reload()\n return status", "def insert_new_label(self, label, index, nvals):\n if label in self.labels: return\n self.labels.append(label)\n self.parents.append(self.find_parent_label(label))\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0", "def put_label(id):\n label_id = dao.set_label(id=id,\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)" ]
[ "0.7274326", "0.7140176", "0.67611367", "0.6694404", "0.6539713", "0.6458034", "0.64482284", "0.6406368", "0.63728607", "0.6343112", "0.6218493", "0.6180373", "0.6086001", "0.6058881", "0.60423875", "0.6024324", "0.6002186", "0.5988165", "0.5969681", "0.59262604", "0.5906312", "0.5893694", "0.5886118", "0.5861668", "0.5831134", "0.5828109", "0.580204", "0.57648677", "0.57189536", "0.5704992" ]
0.77359784
0
Update existing label in given repository
def update_label(self, repository, name, color, old_name=None, **kwargs): data = {'name': name, 'color': color} response = self.session.patch( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, old_name or name ), json=data ) if response.status_code != 200: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def UpdateLabel(self) -> _n_6_t_0:", "def updatelabel(task, label, eid):\n ServerManager.get()\n result = ServerManager.api.update_property(task, eid, prop='label', value=label)\n if result.response_type == 'success':\n click.echo(click.style(result.message, fg='green'))\n else:\n click.echo(click.style(result.message, fg='red'))", "def update_labels(source_repo, service, source_service, destination):\n app = App()\n if source_repo:\n serv = app.get_service(source_service, repo=source_repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n\n for repo_for_copy in destination:\n other_serv = app.get_service(service, repo=repo_for_copy)\n changes = other_serv.update_labels(labels=repo_labels)\n\n click.echo(\"{changes} labels of {labels_count} copied to {repo_name}\".format(\n changes=changes,\n labels_count=len(repo_labels),\n repo_name=repo_for_copy\n ))", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def rdf_update_labels(rdf, node):\n final_list = []\n for i in node.get_labels():\n # print(i)\n final_list += rdf_get_branch(rdf, i)\n for i in final_list:\n node.add_label(i)", "def change_issues_label(self, msg, old_label, new_label):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} label:{} is:open type:issue\".format(\n task_repository_name(), old_label)\n issue_list = client.search_issue(cmd, 10)\n for issue in issue_list:\n trans.wait_for_limit(MAX_RESULT, MAX_RESULT)\n issue.remove_from_labels(old_label)\n issue.add_to_labels(new_label)\n yield \"{} issues has been changed label from {} to {}\".format(len(issue_list), old_label, new_label)", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def update_label(self, uuid, name):\n payload = self._build_params(uuid=uuid, name=name)\n return Label.deserialize(self._post('labels', None, payload))", "def update_from_repo():\n\treturn", "def update(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def put(self, id):\n context = request.environ.get('context')\n resp = dbapi.netdevices_labels_update(context, id, request.json)\n response = {\"labels\": list(resp.labels)}\n return response, 200, None", "def _update_label(self, outer_pos, inner_pos, new_label):\n r, c = outer_pos\n ir, ic = inner_pos\n self.inner_boards[r][c][ir][ic][\"text\"] = new_label", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def put_label(id):\n label_id = dao.set_label(id=id,\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])", "def update_label(label1, label2, idx):\n for i in range(0, len(idx)):\n label1[i] = label2[idx[i]]\n return label1", "def label_experiment(self, exp_id):\n exp = experiment.experiment(new_experiment=False, ts=str(exp_id))\n label = request.form.get('label')\n exp.update_metadata(change_label=True, label=label)\n\n return \"OK\"", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def create_label(self, repository, name, color, **kwargs):\n data = {'name': name, 'color': color}\n response = self.session.post(\n '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository),\n json=data\n )\n if response.status_code != 201:\n raise GitHubError(response)", "def add_label(self, label):\n status = self.ocp.add_label(resource_name=self.name, label=label)\n self.reload()\n return status", "def update_label(UniqueLabel, Label):\n\n UniqueLabel['Confidence'] = ((UniqueLabel['Confidence'] * UniqueLabel['Count']) + Label['Label']['Confidence'])/(UniqueLabel['Count'] + 1)\n UniqueLabel['TimeStamps'].append(Label['Timestamp'])\n UniqueLabel['Count'] += 1\n\t\n return", "def test_issue_replace_labels(self):\n pass", "def update_code_repository(CodeRepositoryName=None, GitConfig=None):\n pass", "def _UpdateLabels(self, args, migration_job, update_fields):\n add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)\n remove_labels = labels_util.GetRemoveLabelsListFromArgs(args)\n value_type = self.messages.MigrationJob.LabelsValue\n update_result = labels_util.Diff(\n additions=add_labels,\n subtractions=remove_labels,\n clear=args.clear_labels\n ).Apply(value_type)\n if update_result.needs_update:\n migration_job.labels = update_result.labels\n update_fields.append('labels')", "def update_labels(self,label_dict):\n \t\tfor key in self.deps:\n \t\t\tfor dependent in self.deps[key]:\n \t\t\t\tlabel = dependent[1]\n \t\t\t\tlabel_dict[label] = label_dict.get(label,0) + 1\n \t\treturn label_dict", "def changeLabel(nuclideBase, newLabel):\n nuclideBase.label = newLabel\n byLabel[newLabel] = nuclideBase", "def test_labels_change(self):\n label_new = factories.LabelFactory(name=\"test_label\",\n object_type='Assessment')\n response = self.api.put(self.assessment, {'labels': [{\n \"name\": label_new.name,\n \"id\": label_new.id\n }]})\n self.assert200(response)\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"LABELS\"],\n (\"test_label\", \"\")\n )", "def add_label(self, new_name, status):\n api_uri = self._uri_dict.get('addLabel')\n data = {\n 'newName': new_name,\n 'status': status\n }\n r_data = self._post(api_uri, data)\n return r_data" ]
[ "0.6681116", "0.6653021", "0.64337254", "0.6265893", "0.62407804", "0.6226869", "0.61520106", "0.6127353", "0.61121166", "0.61037016", "0.6094828", "0.603661", "0.60320926", "0.59240484", "0.59008676", "0.58834106", "0.58732027", "0.5860932", "0.5839947", "0.58363944", "0.5802565", "0.5790831", "0.57733464", "0.5766619", "0.5702953", "0.57003826", "0.5630814", "0.5615852", "0.55864763", "0.5586208" ]
0.72466385
0
Delete existing label in given repository
def delete_label(self, repository, name, **kwargs): response = self.session.delete( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, name ) ) if response.status_code != 204: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_delete_label(self):\n pass", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def repository_delete(ctx: click.Context, repository_name):\n subcommand_repository.cmd_delete(ctx.obj, repository_name)", "def delete_label(id):\n dao.delete_label(id)\n return jsonify(dao.get_label(id))", "def delete_issue_label(repo, project, issue_number, delete_label_id):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = [label_id for label_id in current_label_ids\n if label_id != delete_label_id]\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def test_issue_remove_label(self):\n pass", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delete_code_repository(CodeRepositoryName=None):\n pass", "def delete(self, label):\n if label in self.bindings:\n if not self.locked:\n i = self.bindings[label]\n del self.bindings[label]\n return i\n else:\n if self.parent:\n return self.parent.delete(label)\n else:\n raise SnekEvaluationError('attempting to delete non-existing name {}'.format(label))", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def delete_manifest_label(label_uuid, tag_manifest):\n\n # Find the label itself.\n label = get_manifest_label(label_uuid, tag_manifest)\n if label is None:\n return None\n\n if not label.source_type.mutable:\n raise DataModelException(\"Cannot delete immutable label\")\n\n # Delete the mapping records and label.\n (TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())\n\n deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n label.delete_instance(recursive=False)\n return label", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def _del_label(self):\n label = self.combobox.currentText()\n if label:\n button = QMessageBox.warning(self, \"Delete label\", \n \"Are you sure that you want to delete label %s ?\" % label,\n QMessageBox.Yes,\n QMessageBox.No)\n if button == QMessageBox.Yes:\n self._label_config.remove_label(str(label))\n self._update_combobox()", "def delete_label(self, label_key):\n # type: (str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response_result = self.connection.api_call(\n \"DELETE\",\n [\"v1\", \"datasets\", self.dataset_id, \"resources\", self.id, \"labels\", label_key],\n headers=headers,\n )\n\n if response_result:\n # Sync the latest data from API to prevent inconsistency\n self.refresh()\n\n return True", "def remove_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--remove-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def RemoveLabel(self, label):\n if self.labels is None:\n self.labels = set()\n else:\n try:\n self.labels.remove(label)\n except KeyError:\n pass", "def delete_metering_label(self, label):\r\n return self.delete(self.metering_label_path % (label))", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def _simple_deletion(self, operation, labels):\r\n label_strings = []\r\n for label in labels:\r\n if inspect.isclass(label) and issubclass(label, Edge):\r\n label_string = label.get_label()\r\n elif isinstance(label, Edge):\r\n label_string = label.get_label()\r\n label_strings.append(label_string)\r\n\r\n return self._delete_related(operation, label_strings)", "def __delitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n del self.docs[doc_label]", "def remove_label(self, key: str):\n del self.labels[key]", "def delete(connection, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.delete()", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()", "def test_heads_delitem_pass(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n del heads[\"branch\"]\n assert \"branch\" not in heads", "def __gitDeleteBranch(self):\n self.vcs.gitDeleteRemoteBranch(self.project.getProjectPath())", "def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)", "def fusion_api_delete_repository(self, uri, api=None, headers=None):\n return self.repository.delete(uri=uri, api=api, headers=headers)" ]
[ "0.7348744", "0.716896", "0.7127642", "0.6931051", "0.6877669", "0.67660433", "0.67263836", "0.67033213", "0.6610836", "0.6601114", "0.6599512", "0.64673704", "0.64669335", "0.64562386", "0.6455143", "0.63759565", "0.6242319", "0.6198236", "0.61135364", "0.60569805", "0.6021209", "0.59816724", "0.5958259", "0.5939189", "0.5937705", "0.5898544", "0.5885807", "0.5855245", "0.5850653", "0.58377445" ]
0.8369633
0
Extracts feature vectors from a given model and dataset and writes them, along with labels, to a file. This function works for any model whose forward() method returns, on any given input x, the pair (prediction on x, feature vector for x) and more generally, any model whose second return value is a feature vector.
def extract_feature_vectors(model, data_loader, parameters, features_file_path): feature_vectors, label_vectors = [], [] # Set model to evaluation mode model.eval() # Show progress bar while iterating over mini-batches with tqdm(total=len(data_loader)) as progress_bar: for i, (X_batch, Y_batch) in enumerate(data_loader): # Dimensions of the input Tensor batch_size, channels, height, width = X_batch.size() # If GPU available, enable CUDA on data if parameters.cuda: X_batch = X_batch.cuda() Y_batch = Y_batch.cuda() # Wrap the input tensor in a Torch Variable X_batch_variable = Variable(X_batch, volatile=True) # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features Y_predicted, features = model(X_batch_variable) # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to # CPU, and convert it to a NumPy array features_numpy = features.data.cpu().numpy() # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array Y_numpy = Y_batch.cpu().numpy() # For each example in the batch, record its features and labels for j in range(batch_size): feature_vectors.append(features_numpy[j,:]) label_vectors.append(Y_numpy[j,:]) progress_bar.update() utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(feat_file_name, label_file_name), color.CYAN)\n\n #Save feature vector to disk\n with open(feat_file_name, 'w') as f:\n pickle.dump(feat_vec, f)\n #Save label file\n with open(label_file_name, 'w') as f:\n pickle.dump(labels, f)", "def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn", "def write_svm_features(clf, vectorizer, round=1, filename=\"features\"):\n\n f = open(\"%s-round%d.txt\" % (filename, round), \"w\")\n weight_feature_pairs = zip(clf.coef_.tolist()[0], vectorizer.feature_names_)\n weight_feature_pairs.sort(key=lambda x:abs(x[0]), reverse=True)\n for weight, word in weight_feature_pairs:\n f.write(\"%s\\t%g\\n\" % (word, weight))\n f.close()", "def write_model_results(model, input_file, repr, tags, outpath):\n input, input_data = read_input(input_file)\n\n if repr == \"c\":\n x = utils.get_features(input, ixs=3)\n else:\n x = utils.get_features(input, chars=True)\n\n w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)\n labels = []\n for inputs, _ in w_batcher.get_batches():\n output = torch.max(model(inputs), 1)[1]\n labels += output.cpu().data.numpy().tolist()\n\n predictions = utils.NEWLINE.join([\"{} {}\".format(input_data[i], tags[labels[i]])\\\n for i in range(len(input_data))])\n with open(outpath, \"w\") as outfile:\n outfile.write(predictions)", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_features(model, config_dict, steps, dataset):\n if config_dict['inference_only']:\n model.load_weights(config_dict['checkpoint']).expect_partial()\n\n # @tf.function\n def get_features_step(x):\n predictions, features = model(x, training=False)\n # Downsample further with one MP layer, strides and kernel 2x2\n # The result per frame is 4x4x32.\n # features = tf.keras.layers.TimeDistributed(\n # tf.keras.layers.MaxPool2D())(features)\n # # The result per frame is 1x32.\n # features = tf.keras.layers.TimeDistributed(\n # tf.keras.layers.GlobalAveragePooling2D())(features)\n features = tf.keras.layers.Flatten()(features)\n return predictions, features\n\n features_to_save = []\n\n with tqdm(total=steps) as pbar:\n for step, sample in enumerate(dataset):\n if step > steps:\n break\n pbar.update(1)\n to_save_dict = {}\n x_batch_train, y_batch_train, paths = sample\n preds, flow_rgb_map_merge = get_features_step(x_batch_train)\n to_save_dict['paths'] = paths\n to_save_dict['preds'] = preds\n to_save_dict['features'] = flow_rgb_map_merge\n to_save_dict['y'] = y_batch_train\n features_to_save.append(to_save_dict)\n features_to_save = np.asarray(features_to_save)\n np.savez_compressed(config_dict['checkpoint'][:18] + '_saved_features_20480dims', features_to_save)", "def transform_word_vectors(self):\n print('Transforming word vectors')\n \n self.train_X_tfidfvec = self.get_word_vectors(self.train_X)\n self.val_X_tfidfvec = self.get_word_vectors(self.val_X)\n self.test_X_tfidfvec = self.get_word_vectors(self.test_X)\n if self.savename is not None:\n with open(self.savename + '_X_tfidfvec.obj','wb') as f:\n pickle.dump((self.train_X_tfidfvec,self.val_X_tfidfvec,self.test_X_tfidfvec),f) \n print('Done transforming word vectors')", "def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)", "def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, PARAM_EXTENSION)\n with open(filename, 'w') as f:\n pickle.dump(data, f)", "def generate_and_save_train_features(train_input, train_output, bag_of_words, tfidf):\n df_train = get_df(train_input)\n train_words = np.array(df_train.text.str.lower().values)\n\n bag_of_words.fit(train_words)\n\n train_words_binary_matrix = bag_of_words.transform(train_words)\n feature_names = bag_of_words.get_feature_names_out()\n\n tfidf.fit(train_words_binary_matrix)\n train_words_tfidf_matrix = tfidf.transform(train_words_binary_matrix)\n\n save_matrix(df_train, train_words_tfidf_matrix, feature_names, train_output)", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)", "def cross_validation(feature_train, help_rank_train, model_name):\n clf = svm.SVC(kernel='linear', C=1).fit(feature_train, help_rank_train)\n clf_model = open(model_name,'wb')\n dump(clf, clf_model, -1)\n return", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)", "def walk_forward_cv(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n optimal_params_by_model = {}\r\n cv_metadata_by_model = {}\r\n cv_predictions_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.cv_params = self.cv_params\r\n svm.test_name = self.test_name\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.run_svm_cv()\r\n optimal_params_by_model['SVM'] = svm.svm_optimal_params\r\n cv_metadata_by_model['SVM'] = svm.metadata\r\n cv_predictions_by_model['SVM'] = svm.svm_cv_predictions\r\n \r\n self.optimal_params_by_output[output_name] = optimal_params_by_model\r\n self.cv_metadata_by_output[output_name] = cv_metadata_by_model\r\n self.cv_predictions_by_output[output_name] = cv_predictions_by_model", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def generate_and_save_test_features(test_input, test_output, bag_of_words, tfidf):\n df_test = get_df(test_input)\n test_words = np.array(df_test.text.str.lower().values)\n\n test_words_binary_matrix = bag_of_words.transform(test_words)\n test_words_tfidf_matrix = tfidf.transform(test_words_binary_matrix)\n feature_names = bag_of_words.get_feature_names_out()\n\n save_matrix(df_test, test_words_tfidf_matrix, feature_names, test_output)", "def save_models(\n output_path,\n asv_model,\n asv_preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n):\n asv_state_dict = asv_model.state_dict()\n # Add preprocessing data for Xvectors (if any)\n asv_state_dict.update(asv_preprocessing_parameters)\n torch.save(asv_state_dict, output_path + \"_asv_model\")\n\n # Use existing function to save CM model\n save_cm_model(\n cm_feature_network,\n cm_model,\n bonafide_cm_features,\n output_path + \"_cm_model\"\n )", "def get_model_output_and_feature(\n model,\n batch_x\n):\n outputs, features = model.get_output_and_feature(batch_x, training=False)\n return outputs, features", "def dump_slice_dataset(X: csr_matrix,\n y: csr_matrix,\n feat_file: Union[str, TextIOWrapper],\n label_file: Union[str, TextIOWrapper]) -> None:\n if isinstance(feat_file, str):\n feat_file = open(feat_file, 'w')\n elif isinstance(feat_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'feature_file is type {type(feat_file)} but should be either str or TextIOWrapper')\n\n if isinstance(label_file, str):\n label_file = open(label_file, 'w')\n elif isinstance(label_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'label_file is type {type(label_file)} but should be either str or TextIOWrapper')\n\n if X.shape[0] != y.shape[0]:\n raise Exception('X and y must have same shape')\n\n # 1. create sparse label file\n # format:\n # The first line of both the files contains the number of rows\n # the label file contains indices of active labels\n # and the corresponding value (always 1 in this case) starting from 0\n\n # write header\n label_header = f'{y.shape[0]} {y.shape[1]}\\n'\n label_file.write(label_header)\n # write data\n for label_vector in y:\n label_idx = label_vector.nonzero()[1]\n line = f'{\" \".join([f\"{label_id}:1\" for label_id in map(str, label_idx)])}\\n'\n label_file.write(line)\n\n label_file.close()\n\n # 2. create dense feature file\n # format:\n # The first line of both the files contains the number of rows\n # For features, each line contains D (the dimensionality of the feature vectors), space separated, float values\n\n # write header\n feature_header = f'{X.shape[0]} {X.shape[1]}\\n'\n feat_file.write(feature_header)\n # write data\n for feature_vector in X:\n line = f'{\" \".join(map(str, [i if i > 0.0 else int(0) for i in feature_vector[0].toarray().ravel()]))}\\n'\n feat_file.write(line)\n\n feat_file.close()\n\n return", "def save_to_arff(file_path, interactions, labels, selection,\n vectorizer=None, unlabelled=False, meka=True, use_bzip=True):\n if use_bzip:\n zipper = bz2\n else:\n zipper = gzip\n\n if vectorizer is None:\n vectorizer = CountVectorizer(lowercase=False, binary=True)\n\n X, y = interactions_to_Xy_format(interactions, selection)\n mlb = MultiLabelBinarizer(classes=sorted(labels), sparse_output=False)\n if not unlabelled:\n y = mlb.fit_transform(y)\n X = vectorizer.fit_transform(X)\n\n if meka:\n header = \"@relation 'PTMs: -C %d'\\n\\n\" % (len(labels))\n else:\n header = \"@relation PTMs\\n\\n\"\n\n for label in labels:\n header += \"@attribute %s {0,1}\\n\" % (label)\n for feature in (rename(x) for x in vectorizer.get_feature_names()):\n header += \"@attribute %s numeric\\n\" % (feature)\n\n header += \"\\n@data\\n\\n\"\n\n with zipper.open(file_path, 'wb') as fp:\n X = X.todense()\n if unlabelled:\n X = X.astype(str)\n y = y.astype(str)\n y[:, :] = '?'\n vec = np.hstack([y, X])\n np.savetxt(\n fp, X=vec, fmt='%s', delimiter=',', comments='', header=header\n )", "def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n words_vec, features = countize(row[3], row[2], words_vec, features)\n try:\n words_vec, features = countize(row[6], row[2], words_vec, features)\n except:\n pass\n pickle.dump(words_vec, open(\"ind_vectors.data\", \"wb\"))\n pickle.dump(features, open(\"i_features.data\", \"wb\"))\n return words_vec, features", "def save(self, model_out_file):\n\t\tvariables_dict = {v.name: v for v in tf.global_variables()}\n\t\tvalues_dict = self.sess.run(variables_dict)\n\t\tnp.savez(open(model_out_file, 'wb'), **values_dict)", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)" ]
[ "0.6803554", "0.6240717", "0.6221068", "0.6063516", "0.6009722", "0.5950848", "0.5824239", "0.58020353", "0.57663727", "0.57563233", "0.5755955", "0.57133436", "0.5701105", "0.5675284", "0.56728137", "0.56672686", "0.56518734", "0.56423616", "0.56261265", "0.56195384", "0.56093127", "0.559749", "0.55943036", "0.5589769", "0.55883914", "0.55862075", "0.5547711", "0.5543332", "0.5540957", "0.5536413" ]
0.6868015
0
Returns the average distance between pairs of vectors in a given list of vectors.
def average_distance_between_vectors(vectors, distance): vectors = numpy.array(vectors) vectors = vectors - numpy.mean(vectors, axis=0) vectors = normalize(vectors) vectors = list(vectors) average_distance = utils.RunningAverage() for vector_1, vector_2 in itertools.combinations(vectors, r=2): # All pairs of vectors average_distance.update(distance(vector_1, vector_2)) return average_distance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_vectors(vectors_list):\n return np.mean(vectors_list, axis=0)", "def compute_average(vec_list):\r\n return np.sum(vec_list, axis = 0)/len(vec_list)", "def average(cls, vectors):\n return cls.sum(vectors) / len(vectors)", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def getAveragePositionFromList( positionsList ):\n \n vectors = [ vector.makeMVector( values = [x, y, z] ) for x, y, z in positionsList ]\n \n vectorsSum = vector.makeMVector()\n \n for v in vectors:\n \n vectorsSum += v\n \n vectorsAverage = vectorsSum / len( positionsList )\n \n return [ vectorsAverage[0], vectorsAverage[1], vectorsAverage[2] ]", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def average_distance(c1, c2):\n return sum(sum(symmetric_distances[p1][p2] for p1 in c1) for p2 in c2) \\\n / (len(c1) * len(c2))", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def average_distance(predictions, targets):\n total_distance = 0\n for prediction, target in zip(predictions, targets):\n total_distance += Levenshtein.distance(prediction, target)\n return total_distance / len(predictions)", "def mse (vec1, vec2):\n sum = 0.0 #Initializes sum to 0\n count = len(vec1) #Number of total elements in each vector\n for i in range(count):\n sum += (vec2[i]-vec1[i])**2 #Adds the square of the difference between the values at each position in the two vectors\n return sum/count", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def pearson_distance(vector1, vector2) :\n sum1 = sum(vector1)\n sum2 = sum(vector2)\n\n sum1Sq = sum([pow(v,2) for v in vector1])\n sum2Sq = sum([pow(v,2) for v in vector2])\n\n pSum = sum([vector1[i] * vector2[i] for i in range(len(vector1))])\n\n num = pSum - (sum1*sum2/len(vector1))\n den = math.sqrt((sum1Sq - pow(sum1,2)/len(vector1)) * (sum2Sq - pow(sum2,2)/len(vector1)))\n\n if den == 0 : return 0.0\n return 1.0 - num/den", "def hellinger_dist(v1, v2):\n if len(v1) != len(v2):\n raise ValueError(\"Vectors should have the same size! \")\n return sqrt( sum( map(lambda e: \n (sqrt(e[0])-sqrt(e[1]))**2, zip(v1,v2))))/sqrt(2)", "def avg(vector):\n if len(vector) == 0:\n return 0\n return sum(vector) / len(vector)", "def euclidean_distance(list1, list2):\n # Make sure we're working with lists\n # Sorry, no other iterables are permitted\n assert isinstance(list1, list)\n assert isinstance(list2, list)\n\n dist = 0\n\n # 'zip' is a Python builtin, documented at\n # <http://www.python.org/doc/lib/built-in-funcs.html>\n for item1, item2 in zip(list1, list2):\n dist += (item2 - item1)**2\n return math.sqrt(dist)", "def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))", "def dist(self, a, b, l):\n # works for non-arrays\n return sum( ((i-j)/k)**2 for i,j,k in zip(a, b, l) )", "def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means", "def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means", "def compare_vectors(v1, v2):\n if len(v1) == len(v2):\n distance = 0\n for i in xrange(len(v1)):\n distance += (v1[i] - v2[i]) ** 2\n return distance\n else:\n print \"vector not match in dimensions\"", "def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)", "def average_distance(l1, l2, distance_function=None):\n\n if not distance_function:\n distance_function = levenshtein_ratio\n counter = 0.0\n numerator = 0.0\n \n #compute array of values\n# if not l1 or not l2:\n# return 1.0\n #make l1 the shortes\n l1, l2 = len(l1)<len(l2) and (l1, l2) or (l2, l1)\n \n #compute the distrances\n distances = []\n for s1 in l1:\n distances += [(distance_function(s1, s2), s1, s2) for s2 in l2]\n# ls.sort(reverse=True)\n# distances.append((ls, s1))\n distances.sort(reverse=True)\n #compute maxima for each colum and each row\n done = set()\n for d, s1, s2 in distances:\n if s1 not in done and s2 not in done:\n done.add(s1)\n done.add(s2) \n counter += d\n numerator += 1\n #if there is a difference in length, we penalize for each item \n difference = len(l2) - len(l1)\n counter += .8 * difference\n numerator += difference\n if numerator == 0:\n return 1.0\n return counter/numerator", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results" ]
[ "0.7456902", "0.7266837", "0.7161427", "0.66369", "0.66369", "0.6613315", "0.6526983", "0.6526983", "0.652161", "0.652161", "0.652161", "0.6493121", "0.6426592", "0.641932", "0.61453825", "0.6135802", "0.6098236", "0.60969055", "0.60628074", "0.6040219", "0.6035585", "0.6013587", "0.5990824", "0.5973708", "0.5933027", "0.5933027", "0.58944213", "0.5887823", "0.58707106", "0.58558226" ]
0.8062336
0
Reads feature vectors and labels from a file and prints information about their clustering properties. Here, we think of the space of feature vectors, and consider a vector v_i to be in cluster j if j is one of the labels for example i.
def analyze_feature_vector_clusters(features_file_path, distance=utils.L2_distance): feature_vectors, label_vectors = utils.read_feature_and_label_vectors(features_file_path) logging.info('Building clusters...') # Map from (integer j) --> (list of indices i such that feature_vectors[i] is in cluster j) # Cluster 0 indicates no disease indices_for_label = map_labels_to_example_indices(label_vectors) logging.info('...done.') logging.info('Computing global and within-cluster average distances') # Compute average distance between vectors overall global_average_distance = average_distance_between_vectors(feature_vectors, distance) logging.info('Global average ' + distance.__name__ + ' between vectors: ' + str(global_average_distance)) # Compute average distance within each cluster for j, vector_indices in indices_for_label.items(): vectors_in_cluster = [feature_vectors[index] for index in vector_indices] average_cluster_distance = average_distance_between_vectors(vectors_in_cluster, distance) logging.info('Average ' + distance.__name__ + ' between vectors in cluster ' + str(j) + ': ' + str(average_cluster_distance))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os.path.isfile(label_file_name):\n prettyPrint(\"Feature vector files {0} could not be found. Generating from scratch instead ...\".format(feat_file_name), color.CYAN)\n return None, None\n with open(feat_file_name, 'r') as f:\n feat_vec = pickle.load(f)\n with open(label_file_name, 'r') as f:\n labels = pickle.load(f)\n\n prettyPrint (\"Done loading feature vectors.\", color.CYAN)\n return feat_vec, labels", "def sent_or_doc_cluster(file_in, file_out, feature, method, n_cluster, show_or_write):\n\n original_file = file_in[0]\n original_words_file = file_in[1]\n file_vec = file_in[2]\n\n if feature.lower() == 'onehot':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, onehot in id_onehot.items():\n x.append(onehot.tolist())\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_cluster=n_cluster).fit(X)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n pass\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n\n elif feature.lower() == 'vec':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, vec in id_vec.items():\n x.append(vec.tolist()) # int object jas nor attribute 'tolist'\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_clusters=n_cluster).fit(X)\n else:\n raise ValueError(\"Method must be 'ap' or \"\n \"'kmeans'. Got %s instead\"\n % method)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n elif feature.lower() == 'doc2vec':\n # word2vec.doc2vec\n pass\n else:\n raise ValueError(\n \"Feature must be 'onehot' or 'vec' or 'doc2vec'. Got %s instead\" % feature)\n pass\n\n pass", "def print_clusters(vectors, labels, nclusters, show=False):\n plt.figure(1)\n plt.clf()\n\n vecs2D = TSNE(n_components=2).fit_transform(vectors)\n\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n for k, col in zip(range(nclusters), colors):\n my_members = labels == k\n\n cluster_vecs2D = vecs2D[my_members, :]\n\n print(cluster_vecs2D)\n print(cluster_vecs2D[:,0])\n print(cluster_vecs2D[:,1])\n\n plt.scatter(cluster_vecs2D[:,0], \n cluster_vecs2D[:,1], \n c=col, \n label='cluster {}'.format(k))\n\n plt.title('Estimated clusters')\n plt.legend()\n\n if show:\n plt.show()\n\n cwd = os.getcwd()\n if not os.path.exists(cwd+\"/plots\"):\n os.makedirs(cwd+\"/plots\")\n plt.savefig(cwd+'/plots/clusters.png')", "def read_data(feature_file, label_file):", "def load_libsvm_file(file, labels_format=\"list\", sort_indices=False):\n if labels_format == 'list':\n labels, features = _load_libsvm_file_labels_list(file, sort_indices)\n return csr_matrix(features), labels\n elif labels_format == 'csr_matrix':\n labels, features = _load_libsvm_file_labels_csr_matrix(file, sort_indices)\n return csr_matrix(features), csr_matrix(labels)\n else:\n raise ValueError(\"Label format {} is not valid format\".format(labels_format))", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def load_glove_vec(fname):\n word_vecs = {}\n length = 0\n with open(fname, \"rb\") as f:\n for i, line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n word_vecs[word] = np.array(L[1:], dtype='float32')\n if length == 0:\n length = len(word_vecs[word])\n return word_vecs, length", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def load_glove_vectors(filename, vocab):\n dct = {}\n vectors = array.array('d')\n current_idx = 0\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n for _, line in enumerate(f):\n tokens = line.split(\" \")\n word = tokens[0]\n entries = tokens[1:]\n if not vocab or word in vocab:\n dct[word] = current_idx\n vectors.extend(float(x) for x in entries)\n current_idx += 1\n word_dim = len(entries)\n num_vectors = len(dct)\n return [np.array(vectors).reshape(num_vectors, word_dim), dct]", "def main():\n\n# The following codes loads the data set into a 2D np array called data\n\twith open('complete_data.csv') as features_file:\n\t\tcsv_reader = csv.DictReader(features_file, delimiter = ',')\n\t\tdata = []\n\t\tcounter = 0\n\t\tfor row in csv_reader:\n\t\t\tprint(\"csv_reader row:\", row)\n\t\t\t# if(counter == 20):\n\t\t\t# \tbreak\n\t\t\tcounter+=1\n\t\t\tcleaned_row = []\n\t\t\tcleaned_row.append(row['track'])\n\t\t\tcleaned_row.append(row['loudness'])\n\t\t\tcleaned_row.append(row['score'])\n\t\t\tdata.append(np.array(cleaned_row))\n\t\tdata = random.sample(list(data), 30)\n\t\tdata = np.array(data)\n\n\n\tX = []\n\tY = []\n\tcounter = 0\n\tfor row in data:\n\t\t# if(counter == 10):\n\t\t# \tbreak\n\t\t# counter+=1\n\t\tY.append(row[0])\n\t\tl = [float(i) for i in row[1:]]\n\t\tX.append(l)\n\tX = np.array(X)\n\tY = np.array(Y)\n\n\tcentroid_indices2,centroids2 = sk_learn_cluster(X,Y,3)\n\n\tplot_word_clusters(data, centroids2, centroid_indices2 )", "def _extractGloveVects():\n \n embeddings_index = {}\n\n with open(GLOVE_CORPUS_FILE) as f:\n for line in f:\n values = line.split()\n word = values[0].lower()\n if word not in _cachedStopWords:\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n return embeddings_index", "def cluster_shrinkage_clustering(from_file):\n points = read_points(from_file)\n shuffle(points)\n S = similarity_matrix(points, similarity_measure=euclidean_distance)\n A = cluster(S, k=10, max_iter=1000)\n labels = [np.argmax(p) for p in A]\n xs, ys = zip(*points)\n \n return xs, ys, labels", "def train_routine(training_file, output_folder):\n if output_folder[-1] != '/':\n output_folder += '/'\n\n svm_file = output_folder + 'svm.txt'\n centroid_file = output_folder + 'centroids.txt'\n ids_file = output_folder + 'ids.txt'\n\n surf = cv2.SURF(250, extended=False)\n categories = dict()\n ids = dict()\n id = 1\n features = list()\n\n print \"Extracting features\"\n for line in open(training_file):\n try:\n category, path = line.split(';')\n except:\n print \"Error: File not in proper format. Ensure: <category/class name>; <path to image of said category>\"\n sys.exit(0)\n path = path.strip()\n\n try:\n img = cv2.imread(path)\n #img = cv2.resize(img, (500, 500))\n except Exception as e:\n print e\n continue\n\n keypoints, descriptors = surf.detectAndCompute(img, None)\n\n if not category in categories:\n categories[category] = Category(label=category)\n ids[category] = id\n id += 1\n categories[category].add_feature(descriptors)\n\n #for category in categories:\n #f = categories[category].yield_features()\n ##features.extend(f)\n #for i in f:\n #features.extend(i)\n\n print \"Calculating centroids\"\n #np_features = numpy.array(features)\n #print \"Features: \", np_features.shape\n #centroids, labels = kmeans2(np_features, FEATURE_TYPES)\n centroids = helpers.loadObject(output_folder + 'centroids.txt')\n print centroids.shape\n\n print \"Forming bag of words\"\n X, Y = [], []\n for category in categories:\n categories[category].calc_bagofwords(centroids)\n for bow in categories[category].bagofwords:\n X.append(bow)\n Y.append(ids[category])\n print \"Fitting linear SVMs onto the bag of words\"\n lin_clf = svm.LinearSVC()\n lin_clf.fit(X, Y)\n\n helpers.saveObject(lin_clf, svm_file)\n helpers.saveObject(centroids, centroid_file)\n helpers.saveObject(ids, ids_file)", "def print_cluster_attributes(self, objects):\n print(\"\\n\")\n print((\"ClusterName\".ljust(35),\":\",objects.ClusterName.value()))\n print((\"Repository Disk\".ljust(35),\":\", \\\n objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value()))\n print(\"\\nNodes in the cluster :\\n-----------------------\")\n for Node in objects.Node.Node :\n print((\"HostName\".ljust(35),\":\",\\\n Node.HostName.value()))\n print((\"PartitionID\".ljust(35),\":\", \\\n Node.PartitionID.value()))\n print()", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def cluster_text(list_of_text):\n print(\"Clustering text info saved the clustering.txt\")\n vectorizer = TfidfVectorizer(stop_words=\"english\")\n transform = vectorizer.fit_transform(list_of_text)\n\n true_k = 70\n\n model = MiniBatchKMeans(n_clusters=true_k, init=\"k-means++\", max_iter=100, n_init=1)\n model.fit(transform)\n clusters = {}\n for i in model.labels_:\n if not i in clusters:\n clusters[i] = 1\n else:\n clusters[i] += 1\n\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n with open(\"clustering.txt\", \"w+\") as f:\n f.write(\"Top terms per cluster:\\n\")\n for i in range(true_k):\n with open(\"clustering.txt\", \"a\") as f:\n f.write(f\"Cluster {i}\\n\")\n f.write(f\"Number of tweets in this cluster: {clusters[i]}\\n\")\n term_list = []\n for ind in order_centroids[i, :10]:\n with open(\"clustering.txt\", \"a\") as f:\n f.write(terms[ind] + \"\\n\")\n term_list.append(terms[ind] + \"\\n\")\n return model.labels_", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def clusters(self, *args, **kwargs):\n\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdata = load_file(name)\n dump = False\n else:\n url = self.base.format('file/clusters')\n if by_id:\n self.params['query'] = 'cluster:{0}'.format(kwargs.get('value')[0])\n else:\n self.params['date'] = name\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n\n if kwargs.get('return_raw'):\n return jdata\n\n if _check_error(jdata):\n return\n\n simple_list = (\n 'size_top200',\n 'num_clusters',\n )\n\n self.simple_print(jdata, simple_list, indent='\\n\\t')\n for key in simple_list:\n if jdata.get(key):\n self.print_key(key, indent='\\n\\t')\n print('\\n\\t', jdata.get(key))\n\n if jdata.get('clusters'):\n plist = [[]]\n for line in jdata['clusters']:\n plist.append(\n [line['label'], line['avg_positives'], line['id'], line['size']])\n\n pretty_print_special(\n plist,\n ['Label', 'AV Detections', 'Id', 'Size'],\n [40, 15, 80, 8],\n ['l', 'c', 'l', 'c'],\n kwargs.get('email_template')\n )\n\n if dump:\n jsondump(jdata, 'clusters_{0}'.format(name))", "def read_features_from_file(filename):\n\tf = np.loadtxt(filename)\n\treturn f[:,:4],f[:,4:] # feature locations, descriptors", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)", "def parse_tab_file_get_clusters(filename1,\r\n seq_db):\r\n cluster_file = open(filename1, \"r\")\r\n # dictionaries for keeping the counts\r\n member_in_cluster_to_count_dict = dict()\r\n species_in_cluster_count_dict = dict()\r\n names, names_abudance_removed = get_names_from_Seq_db(seq_db)\r\n\r\n # a way of keeping track of the iteration\r\n interation_count = int(0)\r\n # iterate through the file\r\n for line in cluster_file:\r\n interation_count += 1\r\n # call the func to split up the line\r\n cluster_line_split = parse_line(line.rstrip())\r\n if not cluster_line_split:\r\n # this could be a blank line or starts with #\r\n continue\r\n # call the function to get the number of\r\n # elements and species.\r\n members_count, \\\r\n species_count = count_element_in_cluster(cluster_line_split,\r\n names,\r\n names_abudance_removed)\r\n try:\r\n # if we have seen this count before,\r\n # then just add one to it.\r\n member_in_cluster_to_count_dict[members_count] += 1\r\n except KeyError:\r\n # not seen this before, set up a new dic element\r\n # and make the equal 1\r\n member_in_cluster_to_count_dict[members_count] = 1\r\n try:\r\n # if we have seen this count of species before,\r\n # then just add one to it.\r\n species_in_cluster_count_dict[species_count] += 1\r\n except KeyError:\r\n species_in_cluster_count_dict[species_count] = 1\r\n species_in_cluster_list, species_number_of_keys, species_max_val, \\\r\n species_vals_for_bar_chart = covert_dict_to_list_of_value(\r\n species_in_cluster_count_dict)\r\n\r\n # print (\"member_in_cluster_to_count_dict: \",\r\n # member_in_cluster_to_count_dict)\r\n member_in_cluster_list, member_number_of_keys, member_max_val, \\\r\n member_vals_for_bar_chart = covert_dict_to_list_of_value(\r\n member_in_cluster_to_count_dict)\r\n # plot_multi_bar_chart_graph\r\n plot_multi_bar_chart_graph(\"Barchart: database species clusters\",\r\n species_vals_for_bar_chart,\r\n \"Barchart: total members in all cluster\",\r\n member_vals_for_bar_chart,\r\n \"Barchart: cluster size\",\r\n member_vals_for_bar_chart,\r\n filename1)", "def extract_feature_vectors(file, dict):\n f = open(file, 'r')\n num_lines = 0\n\n for line in f:\n if(line.strip()):\n num_lines = num_lines + 1\n\n f.close()\n\n feature_matrix = np.zeros([num_lines, len(dict)])\n\n f = open(file, 'r')\n pos = 0\n\n for line in f:\n if(line.strip()):\n flist = extract_words(line)\n for word in flist:\n if(word in dict):\n feature_matrix[pos, dict.index(word)] = 1\n pos = pos + 1\n\n f.close()\n\n return feature_matrix", "def load_model(self, file=FILENAME, dim=DIMENSION, normalize=False):\n print(\"Loading pretrained Glove vectors from file {}\".format(FILENAME))\n self.dimension = dim\n self.normalize = normalize\n with open(file, \"r\", encoding=\"utf-8\") as textfile:\n self.num_tokens = count_lines(textfile)\n self.tokens_arr = [\"\" for i in range(self.num_tokens)]\n self.embeddings_mat = np.zeros((self.num_tokens, self.dimension))\n\n for idx, line in enumerate(textfile):\n line = line.split()\n token = ''.join(line[:-self.dimension])\n self.tokens_arr[idx] = token\n self.token_to_idx[token] = idx \n vec = list(map(float, line[-self.dimension:]))\n if self.normalize: \n # normalize the vectors as they are put into the matrix\n vec = vec / np.linalg.norm(vec)\n self.embeddings_mat[idx] = vec \n if (idx+1) % 200000 == 0:\n print(\" --{}% loaded.\".format(round(idx/self.num_tokens*100, 2)))\n print(\"Finished loading Glove model. {} vectors loaded\".format(self.num_tokens))", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def _load_glove_vec(fname, vocab):\n print 'load glove...'\n word_vecs = {}\n cnt = 0\n l = open(fname,'r').readline()\n embedding_size = len(l.strip().split()) -1\n print 'embedding vector size: %d'%(embedding_size)\n with open(fname, \"r\") as f:\n for l in f:\n stemp = l.strip().split(' ',1)\n assert len(stemp) == 2\n word = stemp[0]\n if word in vocab:\n word_vecs[stemp[0]] = np.fromstring(' '.join(stemp[1:]),sep = ' ')\n cnt+=1\n if cnt%10000==0:\n print '%d lines...'%cnt\n return (word_vecs,embedding_size)", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def create_mallet_clusters(filename, num_clusters, vocab):\n # Words that appear in the MALLET clusters\n cluster_words = []\n # Clusters - each cluster is a list with entries in format (word, counts)\n clusters_with_counts = [None] * num_clusters\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n tokens = line.strip().split()\n # Extract word and highest count from MALLET file\n # Highest count has form i:j where i is the cluster id\n # and j is the number of counts\n word, highest_count = tokens[1:3]\n if word in vocab:\n cluster_words.append(word)\n cluster_idx, count = [int(s) for s in highest_count.split(':')]\n if clusters_with_counts[cluster_idx] is None:\n clusters_with_counts[cluster_idx] = [(word, count)]\n else:\n clusters_with_counts[cluster_idx].append((word, count))\n\n for c in clusters_with_counts:\n c.sort(key=lambda x: x[1], reverse=True)\n\n # Clusters with words only (sorted in descending count order)\n clusters_words_only = []\n for c in clusters_with_counts:\n clusters_words_only.append([x[0] for x in c])\n\n return clusters_words_only, clusters_counts, cluster_words" ]
[ "0.5931401", "0.58759993", "0.5818201", "0.5670178", "0.55925786", "0.5564548", "0.5553166", "0.5509292", "0.5509292", "0.5505033", "0.55044186", "0.5498727", "0.5490172", "0.54632276", "0.54203254", "0.54109955", "0.5390733", "0.5373782", "0.5334791", "0.5330502", "0.5268046", "0.5259798", "0.5255289", "0.52416337", "0.52404994", "0.5204905", "0.51949906", "0.519409", "0.5173208", "0.51686245" ]
0.65529263
0